From 43de98b30e294c6067272124996a88412419ae73 Mon Sep 17 00:00:00 2001 From: vivienfanghua Date: Wed, 29 Oct 2025 10:46:17 +0800 Subject: [PATCH] add --- comfy/ldm/modules/attention.py | 1 + 1 file changed, 1 insertion(+) diff --git a/comfy/ldm/modules/attention.py b/comfy/ldm/modules/attention.py index f9459c2dc..1f9c56b34 100644 --- a/comfy/ldm/modules/attention.py +++ b/comfy/ldm/modules/attention.py @@ -621,6 +621,7 @@ def attention_flash(q, k, v, heads, mask=None, attn_precision=None, skip_reshape except Exception as e: logging.warning(f"Flash Attention failed, using default SDPA: {e}") out = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=mask, dropout_p=0.0, is_causal=False) + if not skip_output_reshape: out = ( out.transpose(1, 2).reshape(b, -1, heads * dim_head)