From 62f6a1a1da2bc555c1f63ddca338b8c313203309 Mon Sep 17 00:00:00 2001 From: InconsolableCellist <23345188+InconsolableCellist@users.noreply.github.com> Date: Sun, 30 Apr 2023 17:31:55 -0600 Subject: [PATCH] Fixing bug where the attentions were mixed together even when the weight was 0.0 --- comfy/ldm/modules/sub_quadratic_attention.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/comfy/ldm/modules/sub_quadratic_attention.py b/comfy/ldm/modules/sub_quadratic_attention.py index 981b85da2..64b743fb6 100644 --- a/comfy/ldm/modules/sub_quadratic_attention.py +++ b/comfy/ldm/modules/sub_quadratic_attention.py @@ -176,10 +176,10 @@ def _get_attention_scores_no_kv_chunking( attn_scores /= summed attn_probs = attn_scores - if attention_to_mux is not None: + if attention_to_mux is not None and attention_weight > 0.0: attention_to_mux = attention_to_mux.to(attn_probs.device) attn_probs = attn_probs * (1 - attention_weight) + attention_to_mux * attention_weight - print(f"muxed attention with weight {attention_weight}") + # print(f"muxed attention with weight {attention_weight}") hidden_states_slice = torch.bmm(attn_probs, value)