Fix issue with batch_size > 1 on some models. (#12892)

This commit is contained in:
comfyanonymous 2026-03-11 13:37:31 -07:00 committed by GitHub
parent 4f4f8659c2
commit f6274c06b4
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -144,9 +144,9 @@ def apply_mod(tensor, m_mult, m_add=None, modulation_dims=None):
return tensor * m_mult return tensor * m_mult
else: else:
for d in modulation_dims: for d in modulation_dims:
tensor[:, d[0]:d[1]] *= m_mult[:, d[2]] tensor[:, d[0]:d[1]] *= m_mult[:, d[2]:d[2] + 1]
if m_add is not None: if m_add is not None:
tensor[:, d[0]:d[1]] += m_add[:, d[2]] tensor[:, d[0]:d[1]] += m_add[:, d[2]:d[2] + 1]
return tensor return tensor