From 04b308229ee59b5aebc0c78ea416e0b3ac22c146 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 31 May 2024 11:18:37 -0400 Subject: [PATCH 1/4] Small refactor of preview code. --- latent_preview.py | 23 ++++++++++------------- 1 file changed, 10 insertions(+), 13 deletions(-) diff --git a/latent_preview.py b/latent_preview.py index b258fcf20..54aa233f2 100644 --- a/latent_preview.py +++ b/latent_preview.py @@ -11,6 +11,13 @@ import logging MAX_PREVIEW_RESOLUTION = 512 +def preview_to_image(latent_image): + latents_ubyte = (((latent_image + 1.0) / 2.0).clamp(0, 1) # change scale from -1..1 to 0..1 + .mul(0xFF) # to 0..255 + ).to(device="cpu", dtype=torch.uint8, non_blocking=comfy.model_management.device_supports_non_blocking(latent_image.device)) + + return Image.fromarray(latents_ubyte.numpy()) + class LatentPreviewer: def decode_latent_to_preview(self, x0): pass @@ -24,12 +31,8 @@ class TAESDPreviewerImpl(LatentPreviewer): self.taesd = taesd def decode_latent_to_preview(self, x0): - x_sample = self.taesd.decode(x0[:1])[0].detach() - x_sample = 255. * torch.clamp((x_sample + 1.0) / 2.0, min=0.0, max=1.0) - x_sample = np.moveaxis(x_sample.to(device="cpu", dtype=torch.uint8, non_blocking=comfy.model_management.device_supports_non_blocking(x_sample.device)).numpy(), 0, 2) - - preview_image = Image.fromarray(x_sample) - return preview_image + x_sample = self.taesd.decode(x0[:1])[0].movedim(0, 2) + return preview_to_image(x_sample) class Latent2RGBPreviewer(LatentPreviewer): @@ -39,13 +42,7 @@ class Latent2RGBPreviewer(LatentPreviewer): def decode_latent_to_preview(self, x0): self.latent_rgb_factors = self.latent_rgb_factors.to(dtype=x0.dtype, device=x0.device) latent_image = x0[0].permute(1, 2, 0) @ self.latent_rgb_factors - - latents_ubyte = (((latent_image + 1) / 2) - .clamp(0, 1) # change scale from -1..1 to 0..1 - .mul(0xFF) # to 0..255 - ).to(device="cpu", dtype=torch.uint8, non_blocking=comfy.model_management.device_supports_non_blocking(latent_image.device)) - - return Image.fromarray(latents_ubyte.numpy()) + return preview_to_image(latent_image) def get_previewer(device, latent_format): From e2c585f3be4f8f59211b26ea28d175ea63629a78 Mon Sep 17 00:00:00 2001 From: Peter Crabtree Date: Sat, 1 Jun 2024 12:36:08 -0400 Subject: [PATCH 2/4] Fix to allow use of PerpNegGuider with cfg_function_post hooks (like PAG) (#3618) --- .gitignore | 3 ++- comfy_extras/nodes_perpneg.py | 30 ++++++++++++++++++++++++++++-- 2 files changed, 30 insertions(+), 3 deletions(-) diff --git a/.gitignore b/.gitignore index 9f0389241..afad81489 100644 --- a/.gitignore +++ b/.gitignore @@ -15,4 +15,5 @@ venv/ !/web/extensions/logging.js.example !/web/extensions/core/ /tests-ui/data/object_info.json -/user/ \ No newline at end of file +/user/ +comfyui*.log diff --git a/comfy_extras/nodes_perpneg.py b/comfy_extras/nodes_perpneg.py index 306cf9cd0..546276aa1 100644 --- a/comfy_extras/nodes_perpneg.py +++ b/comfy_extras/nodes_perpneg.py @@ -61,12 +61,38 @@ class Guider_PerpNeg(comfy.samplers.CFGGuider): self.neg_scale = neg_scale def predict_noise(self, x, timestep, model_options={}, seed=None): + # in CFGGuider.predict_noise, we call sampling_function(), which uses cfg_function() to compute pos & neg + # but we'd rather do a single batch of sampling pos, neg, and empty, so we call calc_cond_batch([pos,neg,empty]) directly + positive_cond = self.conds.get("positive", None) negative_cond = self.conds.get("negative", None) empty_cond = self.conds.get("empty_negative_prompt", None) - out = comfy.samplers.calc_cond_batch(self.inner_model, [negative_cond, positive_cond, empty_cond], x, timestep, model_options) - return perp_neg(x, out[1], out[0], out[2], self.neg_scale, self.cfg) + (noise_pred_pos, noise_pred_neg, noise_pred_empty) = \ + comfy.samplers.calc_cond_batch(self.inner_model, [positive_cond, negative_cond, empty_cond], x, timestep, model_options) + cfg_result = perp_neg(x, noise_pred_pos, noise_pred_neg, noise_pred_empty, self.neg_scale, self.cfg) + + # normally this would be done in cfg_function, but we skipped + # that for efficiency: we can compute the noise predictions in + # a single call to calc_cond_batch() (rather than two) + # so we replicate the hook here + for fn in model_options.get("sampler_post_cfg_function", []): + args = { + "denoised": cfg_result, + "cond": positive_cond, + "uncond": negative_cond, + "model": self.inner_model, + "uncond_denoised": noise_pred_neg, + "cond_denoised": noise_pred_pos, + "sigma": timestep, + "model_options": model_options, + "input": x, + # not in the original call in samplers.py:cfg_function, but made available for future hooks + "empty_cond": empty_cond, + "empty_cond_denoised": noise_pred_empty,} + cfg_result = fn(args) + + return cfg_result class PerpNegGuider: @classmethod From b249862080d4c046bd7f2680898c2f348c792a12 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sat, 1 Jun 2024 12:47:31 -0400 Subject: [PATCH 3/4] Add an annoying print to a function I want to remove. --- .gitignore | 3 +-- comfy/model_management.py | 1 + 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.gitignore b/.gitignore index afad81489..9f0389241 100644 --- a/.gitignore +++ b/.gitignore @@ -15,5 +15,4 @@ venv/ !/web/extensions/logging.js.example !/web/extensions/core/ /tests-ui/data/object_info.json -/user/ -comfyui*.log +/user/ \ No newline at end of file diff --git a/comfy/model_management.py b/comfy/model_management.py index b353e50bf..3b9fad362 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -879,6 +879,7 @@ def unload_all_models(): def resolve_lowvram_weight(weight, model, key): #TODO: remove + print("WARNING: The comfy.model_management.resolve_lowvram_weight function will be removed soon, please stop using it.") return weight #TODO: might be cleaner to put this somewhere else From 809cc85a8e092ae416ca2652a4b73671b8d3c72b Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sun, 2 Jun 2024 19:21:53 -0400 Subject: [PATCH 4/4] Remove useless code. --- comfy/model_patcher.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/comfy/model_patcher.py b/comfy/model_patcher.py index 2e746d8a9..84592f931 100644 --- a/comfy/model_patcher.py +++ b/comfy/model_patcher.py @@ -76,9 +76,7 @@ class ModelPatcher: def model_size(self): if self.size > 0: return self.size - model_sd = self.model.state_dict() self.size = comfy.model_management.module_size(self.model) - self.model_keys = set(model_sd.keys()) return self.size def clone(self): @@ -90,7 +88,6 @@ class ModelPatcher: n.object_patches = self.object_patches.copy() n.model_options = copy.deepcopy(self.model_options) - n.model_keys = self.model_keys n.backup = self.backup n.object_patches_backup = self.object_patches_backup return n @@ -210,8 +207,9 @@ class ModelPatcher: def add_patches(self, patches, strength_patch=1.0, strength_model=1.0): p = set() + model_sd = self.model.state_dict() for k in patches: - if k in self.model_keys: + if k in model_sd: p.add(k) current_patches = self.patches.get(k, []) current_patches.append((strength_patch, patches[k], strength_model))