From a0690f9df9e731ff31fb9b0d64f1fe7cbc918789 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sat, 21 Oct 2023 20:31:24 -0400 Subject: [PATCH 1/4] Fix t2i adapter issue. --- comfy/controlnet.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/controlnet.py b/comfy/controlnet.py index 73a40acfa..f1355e64e 100644 --- a/comfy/controlnet.py +++ b/comfy/controlnet.py @@ -416,7 +416,7 @@ class T2IAdapter(ControlBase): if control_prev is not None: return control_prev else: - return {} + return None if self.cond_hint is None or x_noisy.shape[2] * 8 != self.cond_hint.shape[2] or x_noisy.shape[3] * 8 != self.cond_hint.shape[3]: if self.cond_hint is not None: From 8cfce083c4eb09ea95bce59f65f1634e09d12b13 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sat, 21 Oct 2023 22:36:04 -0400 Subject: [PATCH 2/4] Fix primitive node control value not getting loaded. --- web/extensions/core/widgetInputs.js | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/web/extensions/core/widgetInputs.js b/web/extensions/core/widgetInputs.js index 84abd8b7d..bad3ac3a7 100644 --- a/web/extensions/core/widgetInputs.js +++ b/web/extensions/core/widgetInputs.js @@ -463,7 +463,11 @@ app.registerExtension({ } if (widget.type === "number" || widget.type === "combo") { - addValueControlWidget(this, widget, "fixed"); + let control_value = this.widgets_values?.[1]; + if (!control_value) { + control_value = "fixed"; + } + addValueControlWidget(this, widget, control_value); } // When our value changes, update other widgets to reflect our changes From e6bc42df4662e571365ffbafe7c2dfac2cee3116 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sun, 22 Oct 2023 03:51:29 -0400 Subject: [PATCH 3/4] Make sub_quad and split work with hypertile. --- comfy/ldm/modules/attention.py | 41 ++++++++++++++++++++++++---------- 1 file changed, 29 insertions(+), 12 deletions(-) diff --git a/comfy/ldm/modules/attention.py b/comfy/ldm/modules/attention.py index 4eda361f3..f8391e19a 100644 --- a/comfy/ldm/modules/attention.py +++ b/comfy/ldm/modules/attention.py @@ -124,11 +124,14 @@ def attention_basic(q, k, v, heads, mask=None): def attention_sub_quad(query, key, value, heads, mask=None): - scale = (query.shape[-1] // heads) ** -0.5 - query = query.unflatten(-1, (heads, -1)).transpose(1,2).flatten(end_dim=1) - key_t = key.transpose(1,2).unflatten(1, (heads, -1)).flatten(end_dim=1) - del key - value = value.unflatten(-1, (heads, -1)).transpose(1,2).flatten(end_dim=1) + b, _, dim_head = query.shape + dim_head //= heads + + scale = dim_head ** -0.5 + query = query.unsqueeze(3).reshape(b, -1, heads, dim_head).permute(0, 2, 1, 3).reshape(b * heads, -1, dim_head) + value = value.unsqueeze(3).reshape(b, -1, heads, dim_head).permute(0, 2, 1, 3).reshape(b * heads, -1, dim_head) + + key = key.unsqueeze(3).reshape(b, -1, heads, dim_head).permute(0, 2, 3, 1).reshape(b * heads, dim_head, -1) dtype = query.dtype upcast_attention = _ATTN_PRECISION =="fp32" and query.dtype != torch.float32 @@ -137,7 +140,7 @@ def attention_sub_quad(query, key, value, heads, mask=None): else: bytes_per_token = torch.finfo(query.dtype).bits//8 batch_x_heads, q_tokens, _ = query.shape - _, _, k_tokens = key_t.shape + _, _, k_tokens = key.shape qk_matmul_size_bytes = batch_x_heads * bytes_per_token * q_tokens * k_tokens mem_free_total, mem_free_torch = model_management.get_free_memory(query.device, True) @@ -171,7 +174,7 @@ def attention_sub_quad(query, key, value, heads, mask=None): hidden_states = efficient_dot_product_attention( query, - key_t, + key, value, query_chunk_size=query_chunk_size, kv_chunk_size=kv_chunk_size, @@ -186,9 +189,19 @@ def attention_sub_quad(query, key, value, heads, mask=None): return hidden_states def attention_split(q, k, v, heads, mask=None): - scale = (q.shape[-1] // heads) ** -0.5 + b, _, dim_head = q.shape + dim_head //= heads + scale = dim_head ** -0.5 + h = heads - q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v)) + q, k, v = map( + lambda t: t.unsqueeze(3) + .reshape(b, -1, heads, dim_head) + .permute(0, 2, 1, 3) + .reshape(b * heads, -1, dim_head) + .contiguous(), + (q, k, v), + ) r1 = torch.zeros(q.shape[0], q.shape[1], v.shape[2], device=q.device, dtype=q.dtype) @@ -248,9 +261,13 @@ def attention_split(q, k, v, heads, mask=None): del q, k, v - r2 = rearrange(r1, '(b h) n d -> b n (h d)', h=h) - del r1 - return r2 + r1 = ( + r1.unsqueeze(0) + .reshape(b, heads, -1, dim_head) + .permute(0, 2, 1, 3) + .reshape(b, -1, heads * dim_head) + ) + return r1 def attention_xformers(q, k, v, heads, mask=None): b, _, dim_head = q.shape From 8b65f5de54426f25cc7c08928332e5b7bf0fd25f Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sun, 22 Oct 2023 03:59:53 -0400 Subject: [PATCH 4/4] attention_basic now works with hypertile. --- comfy/ldm/modules/attention.py | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/comfy/ldm/modules/attention.py b/comfy/ldm/modules/attention.py index f8391e19a..dcf467489 100644 --- a/comfy/ldm/modules/attention.py +++ b/comfy/ldm/modules/attention.py @@ -95,9 +95,19 @@ def Normalize(in_channels, dtype=None, device=None): return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True, dtype=dtype, device=device) def attention_basic(q, k, v, heads, mask=None): + b, _, dim_head = q.shape + dim_head //= heads + scale = dim_head ** -0.5 + h = heads - scale = (q.shape[-1] // heads) ** -0.5 - q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v)) + q, k, v = map( + lambda t: t.unsqueeze(3) + .reshape(b, -1, heads, dim_head) + .permute(0, 2, 1, 3) + .reshape(b * heads, -1, dim_head) + .contiguous(), + (q, k, v), + ) # force cast to fp32 to avoid overflowing if _ATTN_PRECISION =="fp32": @@ -119,7 +129,12 @@ def attention_basic(q, k, v, heads, mask=None): sim = sim.softmax(dim=-1) out = einsum('b i j, b j d -> b i d', sim.to(v.dtype), v) - out = rearrange(out, '(b h) n d -> b n (h d)', h=h) + out = ( + out.unsqueeze(0) + .reshape(b, heads, -1, dim_head) + .permute(0, 2, 1, 3) + .reshape(b, -1, heads * dim_head) + ) return out