From e638f2858a93ea3b94edc2938b213ebc1fcf4e20 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 1 Aug 2024 21:03:26 -0400 Subject: [PATCH 1/7] Hack to make all resolutions work on Flux models. --- comfy/ldm/flux/model.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/comfy/ldm/flux/model.py b/comfy/ldm/flux/model.py index 21f1a877e..e7931c16d 100644 --- a/comfy/ldm/flux/model.py +++ b/comfy/ldm/flux/model.py @@ -124,10 +124,16 @@ class Flux(nn.Module): def forward(self, x, timestep, context, y, guidance, **kwargs): bs, c, h, w = x.shape - img = rearrange(x, "b c (h ph) (w pw) -> b (h w) (c ph pw)", ph=2, pw=2) + patch_size = 2 + pad_h = (patch_size - h % 2) % patch_size + pad_w = (patch_size - w % 2) % patch_size - h_len = (h // 2) - w_len = (w // 2) + x = torch.nn.functional.pad(x, (0, pad_w, 0, pad_h), mode='circular') + + img = rearrange(x, "b c (h ph) (w pw) -> b (h w) (c ph pw)", ph=patch_size, pw=patch_size) + + h_len = ((h + (patch_size // 2)) // patch_size) + w_len = ((w + (patch_size // 2)) // patch_size) img_ids = torch.zeros((h_len, w_len, 3), device=x.device, dtype=x.dtype) img_ids[..., 1] = img_ids[..., 1] + torch.linspace(0, h_len - 1, steps=h_len, device=x.device, dtype=x.dtype)[:, None] img_ids[..., 2] = img_ids[..., 2] + torch.linspace(0, w_len - 1, steps=w_len, device=x.device, dtype=x.dtype)[None, :] @@ -135,4 +141,4 @@ class Flux(nn.Module): txt_ids = torch.zeros((bs, context.shape[1], 3), device=x.device, dtype=x.dtype) out = self.forward_orig(img, img_ids, context, txt_ids, timestep, y, guidance) - return rearrange(out, "b (h w) (c ph pw) -> b c (h ph) (w pw)", h=h_len, w=w_len, ph=2, pw=2) + return rearrange(out, "b (h w) (c ph pw) -> b c (h ph) (w pw)", h=h_len, w=w_len, ph=2, pw=2)[:,:,:h,:w] From ce9ac2fe0581288d4a24869dae2e04a3c2b67061 Mon Sep 17 00:00:00 2001 From: Alexander Brown Date: Thu, 1 Aug 2024 18:40:56 -0700 Subject: [PATCH 2/7] Fix clip_g/clip_l mixup (#4168) --- comfy/text_encoders/flux.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/text_encoders/flux.py b/comfy/text_encoders/flux.py index 849214ce0..0590741bb 100644 --- a/comfy/text_encoders/flux.py +++ b/comfy/text_encoders/flux.py @@ -28,7 +28,7 @@ class FluxTokenizer: return out def untokenize(self, token_weight_pair): - return self.clip_g.untokenize(token_weight_pair) + return self.clip_l.untokenize(token_weight_pair) def state_dict(self): return {} From 369f459b2058f793c1230472f04edc9fd9471b46 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 1 Aug 2024 22:19:53 -0400 Subject: [PATCH 3/7] Fix no longer working on old pytorch. --- nodes.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/nodes.py b/nodes.py index 1994f1190..e296597c5 100644 --- a/nodes.py +++ b/nodes.py @@ -826,9 +826,14 @@ class UNETLoader: CATEGORY = "advanced/loaders" def load_unet(self, unet_name, weight_dtype): - weight_dtype = {"default":None, "fp8_e4m3fn":torch.float8_e4m3fn, "fp8_e5m2":torch.float8_e4m3fn}[weight_dtype] + dtype = None + if weight_dtype == "fp8_e4m3fn": + dtype = torch.float8_e4m3fn + elif weight_dtype == "fp8_e5m2": + dtype = torch.float8_e5m2 + unet_path = folder_paths.get_full_path("unet", unet_name) - model = comfy.sd.load_unet(unet_path, dtype=weight_dtype) + model = comfy.sd.load_unet(unet_path, dtype=dtype) return (model,) class CLIPLoader: From c1696cd1b5f572d7694dde223764861255d2398b Mon Sep 17 00:00:00 2001 From: Jairo Correa Date: Fri, 2 Aug 2024 10:34:12 -0300 Subject: [PATCH 4/7] Add missing import (#4174) --- comfy_extras/nodes_audio.py | 1 + 1 file changed, 1 insertion(+) diff --git a/comfy_extras/nodes_audio.py b/comfy_extras/nodes_audio.py index 6f0e26365..762b48279 100644 --- a/comfy_extras/nodes_audio.py +++ b/comfy_extras/nodes_audio.py @@ -7,6 +7,7 @@ import io import json import struct import random +import hashlib from comfy.cli_args import args class EmptyLatentAudio: From eca962c6dae395cab1258456529030880c188734 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 2 Aug 2024 10:24:53 -0400 Subject: [PATCH 5/7] Add FluxGuidance node. This lets you adjust the guidance on the dev model which is a parameter that is passed to the diffusion model. --- comfy_extras/nodes_flux.py | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/comfy_extras/nodes_flux.py b/comfy_extras/nodes_flux.py index 6c1e8b0e0..b690432b5 100644 --- a/comfy_extras/nodes_flux.py +++ b/comfy_extras/nodes_flux.py @@ -1,3 +1,4 @@ +import node_helpers class CLIPTextEncodeFlux: @classmethod @@ -11,7 +12,7 @@ class CLIPTextEncodeFlux: RETURN_TYPES = ("CONDITIONING",) FUNCTION = "encode" - CATEGORY = "advanced/conditioning" + CATEGORY = "advanced/conditioning/flux" def encode(self, clip, clip_l, t5xxl, guidance): tokens = clip.tokenize(clip_l) @@ -22,6 +23,25 @@ class CLIPTextEncodeFlux: output["guidance"] = guidance return ([[cond, output]], ) +class FluxGuidance: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "conditioning": ("CONDITIONING", ), + "guidance": ("FLOAT", {"default": 3.5, "min": 0.0, "max": 100.0, "step": 0.1}), + }} + + RETURN_TYPES = ("CONDITIONING",) + FUNCTION = "append" + + CATEGORY = "advanced/conditioning/flux" + + def append(self, conditioning, guidance): + c = node_helpers.conditioning_set_values(conditioning, {"guidance": guidance}) + return (c, ) + + NODE_CLASS_MAPPINGS = { "CLIPTextEncodeFlux": CLIPTextEncodeFlux, + "FluxGuidance": FluxGuidance, } From bfb52de866ce659c281a6c243c8485109e4d8b8b Mon Sep 17 00:00:00 2001 From: fgdfgfthgr-fox <60460773+fgdfgfthgr-fox@users.noreply.github.com> Date: Sat, 3 Aug 2024 02:29:03 +1200 Subject: [PATCH 6/7] Lower SAG scale step for finer control (#4158) * Lower SAG step for finer control Since the introduction of cfg++ which uses very low cfg value, a step of 0.1 in SAG might be too high for finer control. Even SAG of 0.1 can be too high when cfg is only 0.6, so I change the step to 0.01. * Lower PAG step as well. * Update nodes_sag.py --- comfy_extras/nodes_pag.py | 2 +- comfy_extras/nodes_sag.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/comfy_extras/nodes_pag.py b/comfy_extras/nodes_pag.py index aec78bd8a..eb28196f4 100644 --- a/comfy_extras/nodes_pag.py +++ b/comfy_extras/nodes_pag.py @@ -12,7 +12,7 @@ class PerturbedAttentionGuidance: return { "required": { "model": ("MODEL",), - "scale": ("FLOAT", {"default": 3.0, "min": 0.0, "max": 100.0, "step": 0.1, "round": 0.01}), + "scale": ("FLOAT", {"default": 3.0, "min": 0.0, "max": 100.0, "step": 0.01, "round": 0.01}), } } diff --git a/comfy_extras/nodes_sag.py b/comfy_extras/nodes_sag.py index 010e99744..5e15b99e5 100644 --- a/comfy_extras/nodes_sag.py +++ b/comfy_extras/nodes_sag.py @@ -96,7 +96,7 @@ class SelfAttentionGuidance: @classmethod def INPUT_TYPES(s): return {"required": { "model": ("MODEL",), - "scale": ("FLOAT", {"default": 0.5, "min": -2.0, "max": 5.0, "step": 0.1}), + "scale": ("FLOAT", {"default": 0.5, "min": -2.0, "max": 5.0, "step": 0.01}), "blur_sigma": ("FLOAT", {"default": 2.0, "min": 0.0, "max": 10.0, "step": 0.1}), }} RETURN_TYPES = ("MODEL",) From 17bbd83176268c76a8597bb3a88768d325536651 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 2 Aug 2024 13:14:28 -0400 Subject: [PATCH 7/7] Fix bug loading flac workflow when it contains = character. --- web/scripts/pnginfo.js | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/web/scripts/pnginfo.js b/web/scripts/pnginfo.js index 8b1b2c61c..4477ed7a1 100644 --- a/web/scripts/pnginfo.js +++ b/web/scripts/pnginfo.js @@ -190,9 +190,10 @@ function parseVorbisComment(dataView) { const comment = getString(dataView, offset, commentLength); offset += commentLength; - const [key, value] = comment.split('='); + const ind = comment.indexOf('=') + const key = comment.substring(0, ind); - comments[key] = value; + comments[key] = comment.substring(ind+1); } return comments;