small bug fixes

This commit is contained in:
Yousef Rafat 2025-10-10 20:35:52 +03:00
parent e684ff2505
commit 89fc51fb91
3 changed files with 7 additions and 5 deletions

View File

@ -15,7 +15,7 @@ class SiglipMultiheadAttentionPoolingHead(torch.nn.Module):
batch_size = hidden_state.shape[0]
probe = self.probe.repeat(batch_size, 1, 1)
hidden_state = self.attention(probe, hidden_state, hidden_state)[0]
hidden_state = self.attention(probe, hidden_state, hidden_state)
residual = hidden_state
hidden_state = self.layernorm(hidden_state)

View File

@ -1164,12 +1164,14 @@ class MultiheadAttentionComfyv(nn.Module):
error_msgs,
)
def forward(self, src, attn_mask = None, key_padding_mask = None):
def forward(self, src, k = None, v = None, attn_mask = None, key_padding_mask = None):
self._q_proj, self._k_proj, self._v_proj = [t.to(src.device).to(src.dtype) for t in (self._q_proj, self._k_proj, self._v_proj)]
q = self._q_proj(src)
k = self._k_proj(src)
v = self._v_proj(src)
if k is None:
k = self._k_proj(src)
if v is None:
v = self._v_proj(src)
output = optimized_attention(q, k, v, self.num_heads, mask = attn_mask)
return self.out_proj(output)

View File

@ -97,7 +97,7 @@ class EncodeVideo(io.ComfyNode):
except:
out = model.encode(chunk)
else:
out = vae.encode_image(chunk)
out = vae.encode_image(chunk, crop=False)
out = out["image_embeds"]
out_cpu = out.cpu()