From 94e9798a4b614627805be197aa3da415a1de7ee4 Mon Sep 17 00:00:00 2001 From: omar92 Date: Thu, 20 Apr 2023 06:19:56 +0200 Subject: [PATCH 01/56] when drag from node input or output show all possible nodes that you can connect --- web/extensions/core/slotDefaults.js | 50 ++++++++++++++++++++++------- 1 file changed, 39 insertions(+), 11 deletions(-) diff --git a/web/extensions/core/slotDefaults.js b/web/extensions/core/slotDefaults.js index 0b6a0a150..3ff5fdb06 100644 --- a/web/extensions/core/slotDefaults.js +++ b/web/extensions/core/slotDefaults.js @@ -1,21 +1,49 @@ import { app } from "/scripts/app.js"; - +import { ComfyWidgets } from "/scripts/widgets.js"; // Adds defaults for quickly adding nodes with middle click on the input/output app.registerExtension({ name: "Comfy.SlotDefaults", init() { LiteGraph.middle_click_slot_add_default_node = true; - LiteGraph.slot_types_default_in = { - MODEL: "CheckpointLoaderSimple", - LATENT: "EmptyLatentImage", - VAE: "VAELoader", - }; + }, + async beforeRegisterNodeDef(nodeType, nodeData, app) { + var nodeId = nodeData.name; + var inputs = []; + //if (nodeData["input"]["optional"] != undefined) { + // inputs = Object.assign({}, nodeData["input"]["required"], nodeData["input"]["optional"]); + //} else { + inputs = nodeData["input"]["required"]; //only show required inputs to reduce the mess also not logica to create node with optional inputs + //} + for (const inputKey in inputs) { + var input = (inputs[inputKey]); + //make sure input[0] is a string + if (typeof input[0] !== "string") continue; + + // for (const slotKey in inputs[inputKey]) { + var type = input[0] + if (type in ComfyWidgets) { + var customProperties = input[1] + //console.log(customProperties) + if (!(customProperties?.forceInput)) continue; //ignore widgets that don't force input + } + + if (!(type in LiteGraph.slot_types_default_out)) { + LiteGraph.slot_types_default_out[type] = ["Reroute"]; + } + if (LiteGraph.slot_types_default_out[type].includes(nodeId)) continue; + LiteGraph.slot_types_default_out[type].push(nodeId); + // } + } + + var outputs = nodeData["output"]; + for (const key in outputs) { + var type = outputs[key]; + if (!(type in LiteGraph.slot_types_default_in)) { + LiteGraph.slot_types_default_in[type] = ["Reroute"];// ["Reroute", "Primitive"]; primitive doesn't always work :'() + } + LiteGraph.slot_types_default_in[type].push(nodeId); + } - LiteGraph.slot_types_default_out = { - LATENT: "VAEDecode", - IMAGE: "SaveImage", - CLIP: "CLIPTextEncode", - }; }, }); From 5229c1f972b4130d5d0ddc19362604c6ec57d1fd Mon Sep 17 00:00:00 2001 From: omar92 Date: Thu, 20 Apr 2023 21:13:14 +0200 Subject: [PATCH 02/56] add option on the settings to change the number of the suggestions --- web/extensions/core/slotDefaults.js | 61 ++++++++++++++++++++--------- 1 file changed, 42 insertions(+), 19 deletions(-) diff --git a/web/extensions/core/slotDefaults.js b/web/extensions/core/slotDefaults.js index 3ff5fdb06..04baadc6a 100644 --- a/web/extensions/core/slotDefaults.js +++ b/web/extensions/core/slotDefaults.js @@ -4,46 +4,69 @@ import { ComfyWidgets } from "/scripts/widgets.js"; app.registerExtension({ name: "Comfy.SlotDefaults", + suggestionsNumber: null, init() { LiteGraph.middle_click_slot_add_default_node = true; + this.suggestionsNumber = app.ui.settings.addSetting({ + id: "Comfy.NodeSuggestions.number", + name: "number of nodes suggestions", + type: "slider", + attrs: { + min: 1, + max: 100, + step: 1, + }, + defaultValue: 5, + onChange: (newVal, oldVal) => { + this.setDefaults(newVal); + } + }); }, + slot_types_default_out: {}, + slot_types_default_in: {}, async beforeRegisterNodeDef(nodeType, nodeData, app) { - var nodeId = nodeData.name; + var nodeId = nodeData.name; var inputs = []; - //if (nodeData["input"]["optional"] != undefined) { - // inputs = Object.assign({}, nodeData["input"]["required"], nodeData["input"]["optional"]); - //} else { - inputs = nodeData["input"]["required"]; //only show required inputs to reduce the mess also not logica to create node with optional inputs - //} + inputs = nodeData["input"]["required"]; //only show required inputs to reduce the mess also not logical to create node with optional inputs for (const inputKey in inputs) { var input = (inputs[inputKey]); - //make sure input[0] is a string if (typeof input[0] !== "string") continue; - // for (const slotKey in inputs[inputKey]) { var type = input[0] if (type in ComfyWidgets) { var customProperties = input[1] - //console.log(customProperties) if (!(customProperties?.forceInput)) continue; //ignore widgets that don't force input } - if (!(type in LiteGraph.slot_types_default_out)) { - LiteGraph.slot_types_default_out[type] = ["Reroute"]; + if (!(type in this.slot_types_default_out)) { + this.slot_types_default_out[type] = ["Reroute"]; } - if (LiteGraph.slot_types_default_out[type].includes(nodeId)) continue; - LiteGraph.slot_types_default_out[type].push(nodeId); - // } - } + if (this.slot_types_default_out[type].includes(nodeId)) continue; + this.slot_types_default_out[type].push(nodeId); + } var outputs = nodeData["output"]; for (const key in outputs) { var type = outputs[key]; - if (!(type in LiteGraph.slot_types_default_in)) { - LiteGraph.slot_types_default_in[type] = ["Reroute"];// ["Reroute", "Primitive"]; primitive doesn't always work :'() + if (!(type in this.slot_types_default_in)) { + this.slot_types_default_in[type] = ["Reroute"];// ["Reroute", "Primitive"]; primitive doesn't always work :'() } - LiteGraph.slot_types_default_in[type].push(nodeId); - } + this.slot_types_default_in[type].push(nodeId); + } + var maxNum = this.suggestionsNumber ? this.suggestionsNumber.value : 5; + this.setDefaults(maxNum); }, + setDefaults(maxNum) { + + LiteGraph.slot_types_default_out = {}; + LiteGraph.slot_types_default_in = {}; + + for (const type in this.slot_types_default_out) { + LiteGraph.slot_types_default_out[type] = this.slot_types_default_out[type].slice(0, maxNum); + } + for (const type in this.slot_types_default_in) { + LiteGraph.slot_types_default_in[type] = this.slot_types_default_in[type].slice(0, maxNum); + } + } }); From 31e60adb2802874a5889623a83149faa32924a98 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 20 Apr 2023 17:30:10 -0400 Subject: [PATCH 03/56] Add GLIGEN example to README. --- README.md | 1 + nodes.py | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index be2cb8ec5..bf16006bf 100644 --- a/README.md +++ b/README.md @@ -25,6 +25,7 @@ This ui will let you design and execute advanced stable diffusion pipelines usin - [ControlNet and T2I-Adapter](https://comfyanonymous.github.io/ComfyUI_examples/controlnet/) - [Upscale Models (ESRGAN, ESRGAN variants, SwinIR, Swin2SR, etc...)](https://comfyanonymous.github.io/ComfyUI_examples/upscale_models/) - [unCLIP Models](https://comfyanonymous.github.io/ComfyUI_examples/unclip/) +- [GLIGEN](https://comfyanonymous.github.io/ComfyUI_examples/gligen/) - Starts up very fast. - Works fully offline: will never download anything. - [Config file](extra_model_paths.yaml.example) to set the search paths for models. diff --git a/nodes.py b/nodes.py index 8555f272a..48c3ee9c3 100644 --- a/nodes.py +++ b/nodes.py @@ -498,7 +498,7 @@ class GLIGENLoader: RETURN_TYPES = ("GLIGEN",) FUNCTION = "load_gligen" - CATEGORY = "_for_testing/gligen" + CATEGORY = "loaders" def load_gligen(self, gligen_name): gligen_path = folder_paths.get_full_path("gligen", gligen_name) @@ -520,7 +520,7 @@ class GLIGENTextBoxApply: RETURN_TYPES = ("CONDITIONING",) FUNCTION = "append" - CATEGORY = "_for_testing/gligen" + CATEGORY = "conditioning/gligen" def append(self, conditioning_to, clip, gligen_textbox_model, text, width, height, x, y): c = [] From d2ef3465ca838e528008cb5e20b40d25079d5176 Mon Sep 17 00:00:00 2001 From: missionfloyd Date: Thu, 20 Apr 2023 18:23:51 -0600 Subject: [PATCH 04/56] Improve current word selection --- web/extensions/core/editAttention.js | 25 +++++++++---------------- 1 file changed, 9 insertions(+), 16 deletions(-) diff --git a/web/extensions/core/editAttention.js b/web/extensions/core/editAttention.js index bebc80b12..cc51a04e5 100644 --- a/web/extensions/core/editAttention.js +++ b/web/extensions/core/editAttention.js @@ -89,24 +89,17 @@ app.registerExtension({ end = nearestEnclosure.end; selectedText = inputField.value.substring(start, end); } else { - // Select the current word, find the start and end of the word (first space before and after) - const wordStart = inputField.value.substring(0, start).lastIndexOf(" ") + 1; - const wordEnd = inputField.value.substring(end).indexOf(" "); - // If there is no space after the word, select to the end of the string - if (wordEnd === -1) { - end = inputField.value.length; - } else { - end += wordEnd; + // Select the current word, find the start and end of the word + const delimiters = " .,\\/!?%^*;:{}=-_`~()\r\n\t"; + + while (!delimiters.includes(inputField.value[start - 1]) && start > 0) { + start--; + } + + while (!delimiters.includes(inputField.value[end]) && end < inputField.value.length) { + end++; } - start = wordStart; - // Remove all punctuation at the end and beginning of the word - while (inputField.value[start].match(/[.,\/#!$%\^&\*;:{}=\-_`~()]/)) { - start++; - } - while (inputField.value[end - 1].match(/[.,\/#!$%\^&\*;:{}=\-_`~()]/)) { - end--; - } selectedText = inputField.value.substring(start, end); if (!selectedText) return; } From 907010e0824eeab12c5948e5afa4df6d0934be9a Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 20 Apr 2023 23:58:25 -0400 Subject: [PATCH 05/56] Remove some useless code. --- comfy/samplers.py | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/comfy/samplers.py b/comfy/samplers.py index 19ebc97d9..15527224e 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -7,23 +7,6 @@ from comfy import model_management from .ldm.models.diffusion.ddim import DDIMSampler from .ldm.modules.diffusionmodules.util import make_ddim_timesteps -class CFGDenoiser(torch.nn.Module): - def __init__(self, model): - super().__init__() - self.inner_model = model - - def forward(self, x, sigma, uncond, cond, cond_scale): - if len(uncond[0]) == len(cond[0]) and x.shape[0] * x.shape[2] * x.shape[3] < (96 * 96): #TODO check memory instead - x_in = torch.cat([x] * 2) - sigma_in = torch.cat([sigma] * 2) - cond_in = torch.cat([uncond, cond]) - uncond, cond = self.inner_model(x_in, sigma_in, cond=cond_in).chunk(2) - else: - cond = self.inner_model(x, sigma, cond=cond) - uncond = self.inner_model(x, sigma, cond=uncond) - return uncond + (cond - uncond) * cond_scale - - #The main sampling function shared by all the samplers #Returns predicted noise def sampling_function(model_function, x, timestep, uncond, cond, cond_scale, cond_concat=None, model_options={}): From 98ae4bbfdee1ea9da62e3d22a3c6428032a78398 Mon Sep 17 00:00:00 2001 From: missionfloyd Date: Thu, 20 Apr 2023 23:55:20 -0600 Subject: [PATCH 06/56] Remove brackets if weight == 1 --- web/extensions/core/editAttention.js | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/web/extensions/core/editAttention.js b/web/extensions/core/editAttention.js index cc51a04e5..b937bb103 100644 --- a/web/extensions/core/editAttention.js +++ b/web/extensions/core/editAttention.js @@ -128,8 +128,13 @@ app.registerExtension({ // Increment the weight const weightDelta = event.key === "ArrowUp" ? delta : -delta; - const updatedText = selectedText.replace(/(.*:)(\d+(\.\d+)?)(.*)/, (match, prefix, weight, _, suffix) => { - return prefix + incrementWeight(weight, weightDelta) + suffix; + const updatedText = selectedText.replace(/\((.*):(\d+(?:\.\d+)?)\)/, (match, text, weight) => { + weight = incrementWeight(weight, weightDelta); + if (weight == 1) { + return text; + } else { + return `(${text}:${weight})`; + } }); inputField.setRangeText(updatedText, start, end, "select"); From 989acd769a6b5f3a5d6e3cd03fafbd9668c2dbdf Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 21 Apr 2023 23:43:38 -0400 Subject: [PATCH 07/56] Cleanup. --- web/extensions/core/slotDefaults.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/extensions/core/slotDefaults.js b/web/extensions/core/slotDefaults.js index 04baadc6a..3ec605900 100644 --- a/web/extensions/core/slotDefaults.js +++ b/web/extensions/core/slotDefaults.js @@ -54,7 +54,7 @@ app.registerExtension({ this.slot_types_default_in[type].push(nodeId); } - var maxNum = this.suggestionsNumber ? this.suggestionsNumber.value : 5; + var maxNum = this.suggestionsNumber.value; this.setDefaults(maxNum); }, setDefaults(maxNum) { From 6908f9c94992b32fbb96be0f6cd8c5b362d72a77 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sat, 22 Apr 2023 14:30:39 -0400 Subject: [PATCH 08/56] This makes pytorch2.0 attention perform a bit faster. --- comfy/ldm/modules/attention.py | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/comfy/ldm/modules/attention.py b/comfy/ldm/modules/attention.py index 98dbda635..c27d032a3 100644 --- a/comfy/ldm/modules/attention.py +++ b/comfy/ldm/modules/attention.py @@ -455,11 +455,7 @@ class CrossAttentionPytorch(nn.Module): b, _, _ = q.shape q, k, v = map( - lambda t: t.unsqueeze(3) - .reshape(b, t.shape[1], self.heads, self.dim_head) - .permute(0, 2, 1, 3) - .reshape(b * self.heads, t.shape[1], self.dim_head) - .contiguous(), + lambda t: t.view(b, -1, self.heads, self.dim_head).transpose(1, 2), (q, k, v), ) @@ -468,10 +464,7 @@ class CrossAttentionPytorch(nn.Module): if exists(mask): raise NotImplementedError out = ( - out.unsqueeze(0) - .reshape(b, self.heads, out.shape[1], self.dim_head) - .permute(0, 2, 1, 3) - .reshape(b, out.shape[1], self.heads * self.dim_head) + out.transpose(1, 2).reshape(b, -1, self.heads * self.dim_head) ) return self.to_out(out) From ee030d281bbd25d385ba9ca10badb66b487cca21 Mon Sep 17 00:00:00 2001 From: Jacob Segal Date: Sat, 22 Apr 2023 16:02:26 -0700 Subject: [PATCH 09/56] Add support for multiple unique inpainting masks This enables workflows like "Inpaint at full resolution" when using batch sizes greater than 1. --- nodes.py | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/nodes.py b/nodes.py index 48c3ee9c3..9335d5243 100644 --- a/nodes.py +++ b/nodes.py @@ -171,24 +171,28 @@ class VAEEncodeForInpaint: def encode(self, vae, pixels, mask): x = (pixels.shape[1] // 64) * 64 y = (pixels.shape[2] // 64) * 64 - mask = torch.nn.functional.interpolate(mask[None,None,], size=(pixels.shape[1], pixels.shape[2]), mode="bilinear")[0][0] + if len(mask.shape) < 3: + mask = mask.unsqueeze(0).unsqueeze(0) + elif len(mask.shape) < 4: + mask = mask.unsqueeze(1) + mask = torch.nn.functional.interpolate(mask, size=(pixels.shape[1], pixels.shape[2]), mode="bilinear") pixels = pixels.clone() if pixels.shape[1] != x or pixels.shape[2] != y: pixels = pixels[:,:x,:y,:] - mask = mask[:x,:y] + mask = mask[:,:x,:y,:] #grow mask by a few pixels to keep things seamless in latent space kernel_tensor = torch.ones((1, 1, 6, 6)) - mask_erosion = torch.clamp(torch.nn.functional.conv2d((mask.round())[None], kernel_tensor, padding=3), 0, 1) - m = (1.0 - mask.round()) + mask_erosion = torch.clamp(torch.nn.functional.conv2d(mask.round(), kernel_tensor, padding=3), 0, 1) + m = (1.0 - mask.round()).squeeze(1) for i in range(3): pixels[:,:,:,i] -= 0.5 pixels[:,:,:,i] *= m pixels[:,:,:,i] += 0.5 t = vae.encode(pixels) - return ({"samples":t, "noise_mask": (mask_erosion[0][:x,:y].round())}, ) + return ({"samples":t, "noise_mask": (mask_erosion[:,:x,:y,:].round())}, ) class CheckpointLoader: @classmethod @@ -759,10 +763,15 @@ def common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, if "noise_mask" in latent: noise_mask = latent['noise_mask'] - noise_mask = torch.nn.functional.interpolate(noise_mask[None,None,], size=(noise.shape[2], noise.shape[3]), mode="bilinear") + if len(noise_mask.shape) < 3: + noise_mask = noise_mask.unsqueeze(0).unsqueeze(0) + elif len(noise_mask.shape) < 4: + noise_mask = noise_mask.unsqueeze(1) + noise_mask = torch.nn.functional.interpolate(noise_mask, size=(noise.shape[2], noise.shape[3]), mode="bilinear") noise_mask = noise_mask.round() noise_mask = torch.cat([noise_mask] * noise.shape[1], dim=1) - noise_mask = torch.cat([noise_mask] * noise.shape[0]) + if noise_mask.shape[0] < latent_image.shape[0]: + noise_mask = noise_mask.repeat(latent_image.shape[0] // noise_mask.shape[0], 1, 1, 1) noise_mask = noise_mask.to(device) real_model = None From c8355ed39ff39a10eb7a3d262f278dc99ad2e73b Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Sun, 23 Apr 2023 10:31:21 +0100 Subject: [PATCH 10/56] use window.name instead of session storage - prevents duplicate stealing session id --- web/scripts/api.js | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/web/scripts/api.js b/web/scripts/api.js index 2b90c2abc..d29faa5ba 100644 --- a/web/scripts/api.js +++ b/web/scripts/api.js @@ -35,7 +35,7 @@ class ComfyApi extends EventTarget { } let opened = false; - let existingSession = sessionStorage["Comfy.SessionId"] || ""; + let existingSession = window.name; if (existingSession) { existingSession = "?clientId=" + existingSession; } @@ -75,7 +75,7 @@ class ComfyApi extends EventTarget { case "status": if (msg.data.sid) { this.clientId = msg.data.sid; - sessionStorage["Comfy.SessionId"] = this.clientId; + window.name = this.clientId; } this.dispatchEvent(new CustomEvent("status", { detail: msg.data.status })); break; From 5282f5643476ba0f55197c3ca8b72ce43525b025 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sun, 23 Apr 2023 12:35:25 -0400 Subject: [PATCH 11/56] Implement Linear hypernetworks. Add a HypernetworkLoader node to use hypernetworks. --- comfy/ldm/modules/attention.py | 69 +++++++++++++--- comfy/model_management.py | 3 + comfy/samplers.py | 10 ++- comfy/sd.py | 23 ++++++ comfy/utils.py | 7 +- comfy_extras/nodes_hypernetwork.py | 87 +++++++++++++++++++++ folder_paths.py | 1 + models/hypernetworks/put_hypernetworks_here | 0 nodes.py | 1 + 9 files changed, 185 insertions(+), 16 deletions(-) create mode 100644 comfy_extras/nodes_hypernetwork.py create mode 100644 models/hypernetworks/put_hypernetworks_here diff --git a/comfy/ldm/modules/attention.py b/comfy/ldm/modules/attention.py index c27d032a3..ce7180d91 100644 --- a/comfy/ldm/modules/attention.py +++ b/comfy/ldm/modules/attention.py @@ -163,13 +163,17 @@ class CrossAttentionBirchSan(nn.Module): nn.Dropout(dropout) ) - def forward(self, x, context=None, mask=None): + def forward(self, x, context=None, value=None, mask=None): h = self.heads query = self.to_q(x) context = default(context, x) key = self.to_k(context) - value = self.to_v(context) + if value is not None: + value = self.to_v(value) + else: + value = self.to_v(context) + del context, x query = query.unflatten(-1, (self.heads, -1)).transpose(1,2).flatten(end_dim=1) @@ -256,13 +260,17 @@ class CrossAttentionDoggettx(nn.Module): nn.Dropout(dropout) ) - def forward(self, x, context=None, mask=None): + def forward(self, x, context=None, value=None, mask=None): h = self.heads q_in = self.to_q(x) context = default(context, x) k_in = self.to_k(context) - v_in = self.to_v(context) + if value is not None: + v_in = self.to_v(value) + del value + else: + v_in = self.to_v(context) del context, x q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q_in, k_in, v_in)) @@ -350,13 +358,17 @@ class CrossAttention(nn.Module): nn.Dropout(dropout) ) - def forward(self, x, context=None, mask=None): + def forward(self, x, context=None, value=None, mask=None): h = self.heads q = self.to_q(x) context = default(context, x) k = self.to_k(context) - v = self.to_v(context) + if value is not None: + v = self.to_v(value) + del value + else: + v = self.to_v(context) q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v)) @@ -402,11 +414,15 @@ class MemoryEfficientCrossAttention(nn.Module): self.to_out = nn.Sequential(nn.Linear(inner_dim, query_dim), nn.Dropout(dropout)) self.attention_op: Optional[Any] = None - def forward(self, x, context=None, mask=None): + def forward(self, x, context=None, value=None, mask=None): q = self.to_q(x) context = default(context, x) k = self.to_k(context) - v = self.to_v(context) + if value is not None: + v = self.to_v(value) + del value + else: + v = self.to_v(context) b, _, _ = q.shape q, k, v = map( @@ -447,11 +463,15 @@ class CrossAttentionPytorch(nn.Module): self.to_out = nn.Sequential(nn.Linear(inner_dim, query_dim), nn.Dropout(dropout)) self.attention_op: Optional[Any] = None - def forward(self, x, context=None, mask=None): + def forward(self, x, context=None, value=None, mask=None): q = self.to_q(x) context = default(context, x) k = self.to_k(context) - v = self.to_v(context) + if value is not None: + v = self.to_v(value) + del value + else: + v = self.to_v(context) b, _, _ = q.shape q, k, v = map( @@ -512,11 +532,25 @@ class BasicTransformerBlock(nn.Module): transformer_patches = {} n = self.norm1(x) + if self.disable_self_attn: + context_attn1 = context + else: + context_attn1 = None + value_attn1 = None + + if "attn1_patch" in transformer_patches: + patch = transformer_patches["attn1_patch"] + if context_attn1 is None: + context_attn1 = n + value_attn1 = context_attn1 + for p in patch: + n, context_attn1, value_attn1 = p(current_index, n, context_attn1, value_attn1) + if "tomesd" in transformer_options: m, u = tomesd.get_functions(x, transformer_options["tomesd"]["ratio"], transformer_options["original_shape"]) - n = u(self.attn1(m(n), context=context if self.disable_self_attn else None)) + n = u(self.attn1(m(n), context=context_attn1, value=value_attn1)) else: - n = self.attn1(n, context=context if self.disable_self_attn else None) + n = self.attn1(n, context=context_attn1, value=value_attn1) x += n if "middle_patch" in transformer_patches: @@ -525,7 +559,16 @@ class BasicTransformerBlock(nn.Module): x = p(current_index, x) n = self.norm2(x) - n = self.attn2(n, context=context) + + context_attn2 = context + value_attn2 = None + if "attn2_patch" in transformer_patches: + patch = transformer_patches["attn2_patch"] + value_attn2 = context_attn2 + for p in patch: + n, context_attn2, value_attn2 = p(current_index, n, context_attn2, value_attn2) + + n = self.attn2(n, context=context_attn2, value=value_attn2) x += n x = self.ff(self.norm3(x)) + x diff --git a/comfy/model_management.py b/comfy/model_management.py index a0d1313d2..6e3a03530 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -133,6 +133,7 @@ def unload_model(): #never unload models from GPU on high vram if vram_state != VRAMState.HIGH_VRAM: current_loaded_model.model.cpu() + current_loaded_model.model_patches_to("cpu") current_loaded_model.unpatch_model() current_loaded_model = None @@ -156,6 +157,8 @@ def load_model_gpu(model): except Exception as e: model.unpatch_model() raise e + + model.model_patches_to(get_torch_device()) current_loaded_model = model if vram_state == VRAMState.CPU: pass diff --git a/comfy/samplers.py b/comfy/samplers.py index 15527224e..b860f25f1 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -197,7 +197,15 @@ def sampling_function(model_function, x, timestep, uncond, cond, cond_scale, con transformer_options = model_options['transformer_options'].copy() if patches is not None: - transformer_options["patches"] = patches + if "patches" in transformer_options: + cur_patches = transformer_options["patches"].copy() + for p in patches: + if p in cur_patches: + cur_patches[p] = cur_patches[p] + patches[p] + else: + cur_patches[p] = patches[p] + else: + transformer_options["patches"] = patches c['transformer_options'] = transformer_options diff --git a/comfy/sd.py b/comfy/sd.py index 211acd70e..92dbb931d 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -254,6 +254,29 @@ class ModelPatcher: def set_model_sampler_cfg_function(self, sampler_cfg_function): self.model_options["sampler_cfg_function"] = sampler_cfg_function + + def set_model_patch(self, patch, name): + to = self.model_options["transformer_options"] + if "patches" not in to: + to["patches"] = {} + to["patches"][name] = to["patches"].get(name, []) + [patch] + + def set_model_attn1_patch(self, patch): + self.set_model_patch(patch, "attn1_patch") + + def set_model_attn2_patch(self, patch): + self.set_model_patch(patch, "attn2_patch") + + def model_patches_to(self, device): + to = self.model_options["transformer_options"] + if "patches" in to: + patches = to["patches"] + for name in patches: + patch_list = patches[name] + for i in range(len(patch_list)): + if hasattr(patch_list[i], "to"): + patch_list[i] = patch_list[i].to(device) + def model_dtype(self): return self.model.diffusion_model.dtype diff --git a/comfy/utils.py b/comfy/utils.py index 0380b91dd..68f93403c 100644 --- a/comfy/utils.py +++ b/comfy/utils.py @@ -1,11 +1,14 @@ import torch -def load_torch_file(ckpt): +def load_torch_file(ckpt, safe_load=False): if ckpt.lower().endswith(".safetensors"): import safetensors.torch sd = safetensors.torch.load_file(ckpt, device="cpu") else: - pl_sd = torch.load(ckpt, map_location="cpu") + if safe_load: + pl_sd = torch.load(ckpt, map_location="cpu", weights_only=True) + else: + pl_sd = torch.load(ckpt, map_location="cpu") if "global_step" in pl_sd: print(f"Global Step: {pl_sd['global_step']}") if "state_dict" in pl_sd: diff --git a/comfy_extras/nodes_hypernetwork.py b/comfy_extras/nodes_hypernetwork.py new file mode 100644 index 000000000..db2f8695c --- /dev/null +++ b/comfy_extras/nodes_hypernetwork.py @@ -0,0 +1,87 @@ +import comfy.utils +import folder_paths +import torch + +def load_hypernetwork_patch(path, strength): + sd = comfy.utils.load_torch_file(path, safe_load=True) + activation_func = sd.get('activation_func', 'linear') + is_layer_norm = sd.get('is_layer_norm', False) + use_dropout = sd.get('use_dropout', False) + activate_output = sd.get('activate_output', False) + last_layer_dropout = sd.get('last_layer_dropout', False) + + if activation_func != 'linear' or is_layer_norm != False or use_dropout != False or activate_output != False or last_layer_dropout != False: + print("Unsupported Hypernetwork format, if you report it I might implement it.", path, " ", activation_func, is_layer_norm, use_dropout, activate_output, last_layer_dropout) + return None + + out = {} + + for d in sd: + try: + dim = int(d) + except: + continue + + output = [] + for index in [0, 1]: + attn_weights = sd[dim][index] + keys = attn_weights.keys() + + linears = filter(lambda a: a.endswith(".weight"), keys) + linears = sorted(list(map(lambda a: a[:-len(".weight")], linears))) + layers = [] + + for lin_name in linears: + lin_weight = attn_weights['{}.weight'.format(lin_name)] + lin_bias = attn_weights['{}.bias'.format(lin_name)] + layer = torch.nn.Linear(lin_weight.shape[1], lin_weight.shape[0]) + layer.load_state_dict({"weight": lin_weight, "bias": lin_bias}) + layers += [layer] + + output.append(torch.nn.Sequential(*layers)) + out[dim] = torch.nn.ModuleList(output) + + class hypernetwork_patch: + def __init__(self, hypernet, strength): + self.hypernet = hypernet + self.strength = strength + def __call__(self, current_index, q, k, v): + dim = k.shape[-1] + if dim in self.hypernet: + hn = self.hypernet[dim] + k = k + hn[0](k) * self.strength + v = v + hn[1](v) * self.strength + + return q, k, v + + def to(self, device): + for d in self.hypernet.keys(): + self.hypernet[d] = self.hypernet[d].to(device) + return self + + return hypernetwork_patch(out, strength) + +class HypernetworkLoader: + @classmethod + def INPUT_TYPES(s): + return {"required": { "model": ("MODEL",), + "hypernetwork_name": (folder_paths.get_filename_list("hypernetworks"), ), + "strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), + }} + RETURN_TYPES = ("MODEL",) + FUNCTION = "load_hypernetwork" + + CATEGORY = "_for_testing" + + def load_hypernetwork(self, model, hypernetwork_name, strength): + hypernetwork_path = folder_paths.get_full_path("hypernetworks", hypernetwork_name) + model_hypernetwork = model.clone() + patch = load_hypernetwork_patch(hypernetwork_path, strength) + if patch is not None: + model_hypernetwork.set_model_attn1_patch(patch) + model_hypernetwork.set_model_attn2_patch(patch) + return (model_hypernetwork,) + +NODE_CLASS_MAPPINGS = { + "HypernetworkLoader": HypernetworkLoader +} diff --git a/folder_paths.py b/folder_paths.py index 3c4ad3711..bb0d65524 100644 --- a/folder_paths.py +++ b/folder_paths.py @@ -32,6 +32,7 @@ folder_names_and_paths["upscale_models"] = ([os.path.join(models_dir, "upscale_m folder_names_and_paths["custom_nodes"] = ([os.path.join(base_path, "custom_nodes")], []) +folder_names_and_paths["hypernetworks"] = ([os.path.join(models_dir, "hypernetworks")], supported_pt_extensions) output_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), "output") temp_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), "temp") diff --git a/models/hypernetworks/put_hypernetworks_here b/models/hypernetworks/put_hypernetworks_here new file mode 100644 index 000000000..e69de29bb diff --git a/nodes.py b/nodes.py index 48c3ee9c3..6ca73fa0c 100644 --- a/nodes.py +++ b/nodes.py @@ -1268,6 +1268,7 @@ def load_custom_nodes(): def init_custom_nodes(): load_custom_nodes() + load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_hypernetwork.py")) load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_upscale_model.py")) load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_post_processing.py")) load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_mask.py")) From 2a09e2aa27620c492f694b66cc10c5f41b101c12 Mon Sep 17 00:00:00 2001 From: BlenderNeko <126974546+BlenderNeko@users.noreply.github.com> Date: Sun, 23 Apr 2023 20:02:08 +0200 Subject: [PATCH 12/56] refactor/split various bits of code for sampling --- comfy/sample.py | 62 +++++++++++++++++++++++++++++++++++++++++++++ comfy/samplers.py | 64 +++++++++++++++++++++++++++-------------------- nodes.py | 60 +++++++------------------------------------- 3 files changed, 108 insertions(+), 78 deletions(-) create mode 100644 comfy/sample.py diff --git a/comfy/sample.py b/comfy/sample.py new file mode 100644 index 000000000..ede89890b --- /dev/null +++ b/comfy/sample.py @@ -0,0 +1,62 @@ +import torch +import comfy.model_management + + +def prepare_noise(latent, seed, disable_noise): + latent_image = latent["samples"] + if disable_noise: + noise = torch.zeros(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, device="cpu") + else: + batch_index = 0 + if "batch_index" in latent: + batch_index = latent["batch_index"] + + generator = torch.manual_seed(seed) + for i in range(batch_index): + noise = torch.randn([1] + list(latent_image.size())[1:], dtype=latent_image.dtype, layout=latent_image.layout, generator=generator, device="cpu") + noise = torch.randn(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, generator=generator, device="cpu") + return noise + +def create_mask(latent, noise): + noise_mask = None + device = comfy.model_management.get_torch_device() + if "noise_mask" in latent: + noise_mask = latent['noise_mask'] + noise_mask = torch.nn.functional.interpolate(noise_mask[None,None,], size=(noise.shape[2], noise.shape[3]), mode="bilinear") + noise_mask = noise_mask.round() + noise_mask = torch.cat([noise_mask] * noise.shape[1], dim=1) + noise_mask = torch.cat([noise_mask] * noise.shape[0]) + noise_mask = noise_mask.to(device) + return noise_mask + +def broadcast_cond(cond, noise): + device = comfy.model_management.get_torch_device() + copy = [] + for p in cond: + t = p[0] + if t.shape[0] < noise.shape[0]: + t = torch.cat([t] * noise.shape[0]) + t = t.to(device) + copy += [[t] + p[1:]] + return copy + +def load_c_nets(positive, negative): + def get_models(cond): + models = [] + for c in cond: + if 'control' in c[1]: + models += [c[1]['control']] + if 'gligen' in c[1]: + models += [c[1]['gligen'][1]] + return models + + return get_models(positive) + get_models(negative) + +def load_additional_models(positive, negative): + models = load_c_nets(positive, negative) + comfy.model_management.load_controlnet_gpu(models) + return models + +def cleanup_additional_models(models): + for m in models: + m.cleanup() \ No newline at end of file diff --git a/comfy/samplers.py b/comfy/samplers.py index 15527224e..541a8db8d 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -392,6 +392,38 @@ def encode_adm(noise_augmentor, conds, batch_size, device): return conds +def calculate_sigmas(model, steps, scheduler, sampler): + """ + Returns a tensor containing the sigmas corresponding to the given model, number of steps, scheduler type and sample technique + """ + if not (isinstance(model, CompVisVDenoiser) or isinstance(model, k_diffusion_external.CompVisDenoiser)): + model = CFGNoisePredictor(model) + if model.inner_model.parameterization == "v": + model = CompVisVDenoiser(model, quantize=True) + else: + model = k_diffusion_external.CompVisDenoiser(model, quantize=True) + + sigmas = None + + discard_penultimate_sigma = False + if sampler in ['dpm_2', 'dpm_2_ancestral']: + steps += 1 + discard_penultimate_sigma = True + + if scheduler == "karras": + sigmas = k_diffusion_sampling.get_sigmas_karras(n=steps, sigma_min=float(model.sigma_min), sigma_max=float(model.sigma_max)) + elif scheduler == "normal": + sigmas = model.get_sigmas(steps) + elif scheduler == "simple": + sigmas = simple_scheduler(model, steps) + elif scheduler == "ddim_uniform": + sigmas = ddim_scheduler(model, steps) + else: + print("error invalid scheduler", scheduler) + + if discard_penultimate_sigma: + sigmas = torch.cat([sigmas[:-2], sigmas[-1:]]) + return sigmas class KSampler: SCHEDULERS = ["karras", "normal", "simple", "ddim_uniform"] @@ -421,41 +453,19 @@ class KSampler: self.denoise = denoise self.model_options = model_options - def _calculate_sigmas(self, steps): - sigmas = None - - discard_penultimate_sigma = False - if self.sampler in ['dpm_2', 'dpm_2_ancestral']: - steps += 1 - discard_penultimate_sigma = True - - if self.scheduler == "karras": - sigmas = k_diffusion_sampling.get_sigmas_karras(n=steps, sigma_min=self.sigma_min, sigma_max=self.sigma_max, device=self.device) - elif self.scheduler == "normal": - sigmas = self.model_wrap.get_sigmas(steps).to(self.device) - elif self.scheduler == "simple": - sigmas = simple_scheduler(self.model_wrap, steps).to(self.device) - elif self.scheduler == "ddim_uniform": - sigmas = ddim_scheduler(self.model_wrap, steps).to(self.device) - else: - print("error invalid scheduler", self.scheduler) - - if discard_penultimate_sigma: - sigmas = torch.cat([sigmas[:-2], sigmas[-1:]]) - return sigmas - def set_steps(self, steps, denoise=None): self.steps = steps if denoise is None or denoise > 0.9999: - self.sigmas = self._calculate_sigmas(steps) + self.sigmas = calculate_sigmas(self.model_wrap, steps, self.scheduler, self.sampler).to(self.device) else: new_steps = int(steps/denoise) - sigmas = self._calculate_sigmas(new_steps) + sigmas = calculate_sigmas(self.model_wrap, new_steps, self.scheduler, self.sampler).to(self.device) self.sigmas = sigmas[-(steps + 1):] - def sample(self, noise, positive, negative, cfg, latent_image=None, start_step=None, last_step=None, force_full_denoise=False, denoise_mask=None): - sigmas = self.sigmas + def sample(self, noise, positive, negative, cfg, latent_image=None, start_step=None, last_step=None, force_full_denoise=False, denoise_mask=None, sigmas=None): + if sigmas is None: + sigmas = self.sigmas sigma_min = self.sigma_min if last_step is not None and last_step < (len(sigmas) - 1): diff --git a/nodes.py b/nodes.py index 48c3ee9c3..601661864 100644 --- a/nodes.py +++ b/nodes.py @@ -16,6 +16,7 @@ sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), "co import comfy.diffusers_convert import comfy.samplers +import comfy.sample import comfy.sd import comfy.utils @@ -739,31 +740,12 @@ class SetLatentNoiseMask: s["noise_mask"] = mask return (s,) - def common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent, denoise=1.0, disable_noise=False, start_step=None, last_step=None, force_full_denoise=False): - latent_image = latent["samples"] - noise_mask = None device = comfy.model_management.get_torch_device() + latent_image = latent["samples"] - if disable_noise: - noise = torch.zeros(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, device="cpu") - else: - batch_index = 0 - if "batch_index" in latent: - batch_index = latent["batch_index"] - - generator = torch.manual_seed(seed) - for i in range(batch_index): - noise = torch.randn([1] + list(latent_image.size())[1:], dtype=latent_image.dtype, layout=latent_image.layout, generator=generator, device="cpu") - noise = torch.randn(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, generator=generator, device="cpu") - - if "noise_mask" in latent: - noise_mask = latent['noise_mask'] - noise_mask = torch.nn.functional.interpolate(noise_mask[None,None,], size=(noise.shape[2], noise.shape[3]), mode="bilinear") - noise_mask = noise_mask.round() - noise_mask = torch.cat([noise_mask] * noise.shape[1], dim=1) - noise_mask = torch.cat([noise_mask] * noise.shape[0]) - noise_mask = noise_mask.to(device) + noise = comfy.sample.prepare_noise(latent, seed, disable_noise) + noise_mask = comfy.sample.create_mask(latent, noise) real_model = None comfy.model_management.load_model_gpu(model) @@ -772,34 +754,10 @@ def common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, noise = noise.to(device) latent_image = latent_image.to(device) - positive_copy = [] - negative_copy = [] + positive_copy = comfy.sample.broadcast_cond(positive, noise) + negative_copy = comfy.sample.broadcast_cond(negative, noise) - control_nets = [] - def get_models(cond): - models = [] - for c in cond: - if 'control' in c[1]: - models += [c[1]['control']] - if 'gligen' in c[1]: - models += [c[1]['gligen'][1]] - return models - - for p in positive: - t = p[0] - if t.shape[0] < noise.shape[0]: - t = torch.cat([t] * noise.shape[0]) - t = t.to(device) - positive_copy += [[t] + p[1:]] - for n in negative: - t = n[0] - if t.shape[0] < noise.shape[0]: - t = torch.cat([t] * noise.shape[0]) - t = t.to(device) - negative_copy += [[t] + n[1:]] - - models = get_models(positive) + get_models(negative) - comfy.model_management.load_controlnet_gpu(models) + models = comfy.sample.load_additional_models(positive, negative) if sampler_name in comfy.samplers.KSampler.SAMPLERS: sampler = comfy.samplers.KSampler(real_model, steps=steps, device=device, sampler=sampler_name, scheduler=scheduler, denoise=denoise, model_options=model.model_options) @@ -809,8 +767,8 @@ def common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, samples = sampler.sample(noise, positive_copy, negative_copy, cfg=cfg, latent_image=latent_image, start_step=start_step, last_step=last_step, force_full_denoise=force_full_denoise, denoise_mask=noise_mask) samples = samples.cpu() - for m in models: - m.cleanup() + + comfy.sample.cleanup_additional_models(models) out = latent.copy() out["samples"] = samples From 5818539743bd390a282a19d7e480177c31bc222b Mon Sep 17 00:00:00 2001 From: BlenderNeko <126974546+BlenderNeko@users.noreply.github.com> Date: Sun, 23 Apr 2023 20:09:09 +0200 Subject: [PATCH 13/56] add docstrings --- comfy/sample.py | 25 ++++++++++++++----------- nodes.py | 6 +++++- 2 files changed, 19 insertions(+), 12 deletions(-) diff --git a/comfy/sample.py b/comfy/sample.py index ede89890b..981781b53 100644 --- a/comfy/sample.py +++ b/comfy/sample.py @@ -2,22 +2,21 @@ import torch import comfy.model_management -def prepare_noise(latent, seed, disable_noise): +def prepare_noise(latent, seed): + """creates random noise given a LATENT and a seed""" latent_image = latent["samples"] - if disable_noise: - noise = torch.zeros(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, device="cpu") - else: - batch_index = 0 - if "batch_index" in latent: - batch_index = latent["batch_index"] + batch_index = 0 + if "batch_index" in latent: + batch_index = latent["batch_index"] - generator = torch.manual_seed(seed) - for i in range(batch_index): - noise = torch.randn([1] + list(latent_image.size())[1:], dtype=latent_image.dtype, layout=latent_image.layout, generator=generator, device="cpu") - noise = torch.randn(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, generator=generator, device="cpu") + generator = torch.manual_seed(seed) + for i in range(batch_index): + noise = torch.randn([1] + list(latent_image.size())[1:], dtype=latent_image.dtype, layout=latent_image.layout, generator=generator, device="cpu") + noise = torch.randn(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, generator=generator, device="cpu") return noise def create_mask(latent, noise): + """creates a mask for a given LATENT and noise""" noise_mask = None device = comfy.model_management.get_torch_device() if "noise_mask" in latent: @@ -30,6 +29,7 @@ def create_mask(latent, noise): return noise_mask def broadcast_cond(cond, noise): + """broadcasts conditioning to the noise batch size""" device = comfy.model_management.get_torch_device() copy = [] for p in cond: @@ -41,6 +41,7 @@ def broadcast_cond(cond, noise): return copy def load_c_nets(positive, negative): + """loads control nets in positive and negative conditioning""" def get_models(cond): models = [] for c in cond: @@ -53,10 +54,12 @@ def load_c_nets(positive, negative): return get_models(positive) + get_models(negative) def load_additional_models(positive, negative): + """loads additional models in positive and negative conditioning""" models = load_c_nets(positive, negative) comfy.model_management.load_controlnet_gpu(models) return models def cleanup_additional_models(models): + """cleanup additional models that were loaded""" for m in models: m.cleanup() \ No newline at end of file diff --git a/nodes.py b/nodes.py index a70668fd7..b8c6d350f 100644 --- a/nodes.py +++ b/nodes.py @@ -744,7 +744,11 @@ def common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, device = comfy.model_management.get_torch_device() latent_image = latent["samples"] - noise = comfy.sample.prepare_noise(latent, seed, disable_noise) + if disable_noise: + noise = torch.zeros(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, device="cpu") + else: + noise = comfy.sample.prepare_noise(latent, seed) + noise_mask = comfy.sample.create_mask(latent, noise) real_model = None From f7a821881476cbd52a513877a9ffe35e6702b850 Mon Sep 17 00:00:00 2001 From: ltdrdata <128333288+ltdrdata@users.noreply.github.com> Date: Mon, 24 Apr 2023 04:58:55 +0900 Subject: [PATCH 14/56] Add clipspace feature. (#541) * Add clipspace feature. * feat: copy content to clipspace * feat: paste content from clipspace Extend validation to allow for validating annotated_path in addition to other parameters. Add support for annotated_filepath in folder_paths function. Generalize the '/upload/image' API to allow for uploading images to the 'input', 'temp', or 'output' directories. * rename contentClipboard -> clipspace * Do deep copy for imgs on copy to clipspace. * add original_imgs into clipspace * Preserve the original image when 'imgs' are modified * robust patch & refactoring folder_paths about annotated_filepath * Only show the Paste menu if the ComfyApp.clipspace is not empty * instant refresh on paste force triggering 'changed' on paste action * subfolder fix on paste logic attach subfolder if subfolder isn't empty --------- Co-authored-by: Lt.Dr.Data --- execution.py | 8 ++++- folder_paths.py | 40 ++++++++++++++++++++++ nodes.py | 8 ++--- server.py | 15 ++++++--- web/scripts/app.js | 83 ++++++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 145 insertions(+), 9 deletions(-) diff --git a/execution.py b/execution.py index 73be6db03..b062deeb1 100644 --- a/execution.py +++ b/execution.py @@ -11,6 +11,7 @@ import torch import nodes import comfy.model_management +import folder_paths def get_input_data(inputs, class_def, unique_id, outputs={}, prompt={}, extra_data={}): valid_inputs = class_def.INPUT_TYPES() @@ -250,7 +251,12 @@ def validate_inputs(prompt, item): return (False, "Value bigger than max. {}, {}".format(class_type, x)) if isinstance(type_input, list): - if val not in type_input: + is_annotated_path = val.endswith("[temp]") or val.endswith("[input]") or val.endswith("[output]") + if is_annotated_path: + if not folder_paths.exists_annotated_filepath(val): + return (False, "Invalid file path. {}, {}: {}".format(class_type, x, val)) + + elif val not in type_input: return (False, "Value not in list. {}, {}: {} not in {}".format(class_type, x, val, type_input)) return (True, "") diff --git a/folder_paths.py b/folder_paths.py index bb0d65524..99a016695 100644 --- a/folder_paths.py +++ b/folder_paths.py @@ -69,6 +69,46 @@ def get_directory_by_type(type_name): return None +# determine base_dir rely on annotation if name is 'filename.ext [annotation]' format +# otherwise use default_path as base_dir +def touch_annotated_filepath(name): + if name.endswith("[output]"): + base_dir = get_output_directory() + name = name[:-9] + elif name.endswith("[input]"): + base_dir = get_input_directory() + name = name[:-8] + elif name.endswith("[temp]"): + base_dir = get_temp_directory() + name = name[:-7] + else: + return name, None + + return name, base_dir + + +def get_annotated_filepath(name, default_dir=None): + name, base_dir = touch_annotated_filepath(name) + + if base_dir is None: + if default_dir is not None: + base_dir = default_dir + else: + base_dir = get_input_directory() # fallback path + + return os.path.join(base_dir, name) + + +def exists_annotated_filepath(name): + name, base_dir = touch_annotated_filepath(name) + + if base_dir is None: + base_dir = get_input_directory() # fallback path + + filepath = os.path.join(base_dir, name) + return os.path.exists(filepath) + + def add_model_folder_path(folder_name, full_folder_path): global folder_names_and_paths if folder_name in folder_names_and_paths: diff --git a/nodes.py b/nodes.py index 6ca73fa0c..b8b6280d6 100644 --- a/nodes.py +++ b/nodes.py @@ -975,7 +975,7 @@ class LoadImage: FUNCTION = "load_image" def load_image(self, image): input_dir = folder_paths.get_input_directory() - image_path = os.path.join(input_dir, image) + image_path = folder_paths.get_annotated_filepath(image, input_dir) i = Image.open(image_path) image = i.convert("RGB") image = np.array(image).astype(np.float32) / 255.0 @@ -990,7 +990,7 @@ class LoadImage: @classmethod def IS_CHANGED(s, image): input_dir = folder_paths.get_input_directory() - image_path = os.path.join(input_dir, image) + image_path = folder_paths.get_annotated_filepath(image, input_dir) m = hashlib.sha256() with open(image_path, 'rb') as f: m.update(f.read()) @@ -1011,7 +1011,7 @@ class LoadImageMask: FUNCTION = "load_image" def load_image(self, image, channel): input_dir = folder_paths.get_input_directory() - image_path = os.path.join(input_dir, image) + image_path = folder_paths.get_annotated_filepath(image, input_dir) i = Image.open(image_path) if i.getbands() != ("R", "G", "B", "A"): i = i.convert("RGBA") @@ -1029,7 +1029,7 @@ class LoadImageMask: @classmethod def IS_CHANGED(s, image, channel): input_dir = folder_paths.get_input_directory() - image_path = os.path.join(input_dir, image) + image_path = folder_paths.get_annotated_filepath(image, input_dir) m = hashlib.sha256() with open(image_path, 'rb') as f: m.update(f.read()) diff --git a/server.py b/server.py index b5403670f..1c5c17916 100644 --- a/server.py +++ b/server.py @@ -112,13 +112,20 @@ class PromptServer(): @routes.post("/upload/image") async def upload_image(request): - upload_dir = folder_paths.get_input_directory() + post = await request.post() + image = post.get("image") + + if post.get("type") is None: + upload_dir = folder_paths.get_input_directory() + elif post.get("type") == "input": + upload_dir = folder_paths.get_input_directory() + elif post.get("type") == "temp": + upload_dir = folder_paths.get_temp_directory() + elif post.get("type") == "output": + upload_dir = folder_paths.get_output_directory() if not os.path.exists(upload_dir): os.makedirs(upload_dir) - - post = await request.post() - image = post.get("image") if image and image.file: filename = image.filename diff --git a/web/scripts/app.js b/web/scripts/app.js index f158f3457..b3e88d46f 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -20,6 +20,12 @@ export class ComfyApp { */ #processingQueue = false; + /** + * Content Clipboard + * @type {serialized node object} + */ + static clipspace = null; + constructor() { this.ui = new ComfyUI(this); @@ -130,6 +136,83 @@ export class ComfyApp { ); } } + + options.push( + { + content: "Copy (Clipspace)", + callback: (obj) => { + var widgets = null; + if(this.widgets) { + widgets = this.widgets.map(({ type, name, value }) => ({ type, name, value })); + } + + let img = new Image(); + var imgs = undefined; + if(this.imgs != undefined) { + img.src = this.imgs[0].src; + imgs = [img]; + } + + ComfyApp.clipspace = { + 'widgets': widgets, + 'imgs': imgs, + 'original_imgs': imgs, + 'images': this.images + }; + } + }); + + if(ComfyApp.clipspace != null) { + options.push( + { + content: "Paste (Clipspace)", + callback: () => { + if(ComfyApp.clipspace != null) { + if(ComfyApp.clipspace.widgets != null && this.widgets != null) { + ComfyApp.clipspace.widgets.forEach(({ type, name, value }) => { + const prop = Object.values(this.widgets).find(obj => obj.type === type && obj.name === name); + if (prop) { + prop.value = value; + } + }); + } + + // image paste + if(ComfyApp.clipspace.imgs != undefined && this.imgs != undefined && this.widgets != null) { + var filename = ""; + if(this.images && ComfyApp.clipspace.images) { + this.images = ComfyApp.clipspace.images; + } + + if(ComfyApp.clipspace.images != undefined) { + const clip_image = ComfyApp.clipspace.images[0]; + if(clip_image.subfolder != '') + filename = `${clip_image.subfolder}/`; + filename += `${clip_image.filename} [${clip_image.type}]`; + } + else if(ComfyApp.clipspace.widgets != undefined) { + const index_in_clip = ComfyApp.clipspace.widgets.findIndex(obj => obj.name === 'image'); + if(index_in_clip >= 0) { + filename = `${ComfyApp.clipspace.widgets[index_in_clip].value}`; + } + } + + const index = this.widgets.findIndex(obj => obj.name === 'image'); + if(index >= 0 && filename != "" && ComfyApp.clipspace.imgs != undefined) { + this.imgs = ComfyApp.clipspace.imgs; + + this.widgets[index].value = filename; + if(this.widgets_values != undefined) { + this.widgets_values[index] = filename; + } + } + } + this.trigger('changed'); + } + } + } + ); + } }; } From ccad603b2e6862a4a719bc34dc6bd32e65a539ad Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sun, 23 Apr 2023 16:03:26 -0400 Subject: [PATCH 15/56] Add a way for nodes to validate their own inputs. --- execution.py | 21 +++++++++++---------- folder_paths.py | 6 +++--- nodes.py | 32 +++++++++++++++++++++++--------- web/scripts/app.js | 2 +- 4 files changed, 38 insertions(+), 23 deletions(-) diff --git a/execution.py b/execution.py index b062deeb1..115efcbda 100644 --- a/execution.py +++ b/execution.py @@ -11,7 +11,6 @@ import torch import nodes import comfy.model_management -import folder_paths def get_input_data(inputs, class_def, unique_id, outputs={}, prompt={}, extra_data={}): valid_inputs = class_def.INPUT_TYPES() @@ -250,14 +249,15 @@ def validate_inputs(prompt, item): if "max" in info[1] and val > info[1]["max"]: return (False, "Value bigger than max. {}, {}".format(class_type, x)) - if isinstance(type_input, list): - is_annotated_path = val.endswith("[temp]") or val.endswith("[input]") or val.endswith("[output]") - if is_annotated_path: - if not folder_paths.exists_annotated_filepath(val): - return (False, "Invalid file path. {}, {}: {}".format(class_type, x, val)) - - elif val not in type_input: - return (False, "Value not in list. {}, {}: {} not in {}".format(class_type, x, val, type_input)) + if hasattr(obj_class, "VALIDATE_INPUTS"): + input_data_all = get_input_data(inputs, obj_class, unique_id) + ret = obj_class.VALIDATE_INPUTS(**input_data_all) + if ret != True: + return (False, "{}, {}".format(class_type, ret)) + else: + if isinstance(type_input, list): + if val not in type_input: + return (False, "Value not in list. {}, {}: {} not in {}".format(class_type, x, val, type_input)) return (True, "") def validate_prompt(prompt): @@ -279,7 +279,8 @@ def validate_prompt(prompt): m = validate_inputs(prompt, o) valid = m[0] reason = m[1] - except: + except Exception as e: + print(traceback.format_exc()) valid = False reason = "Parsing error" diff --git a/folder_paths.py b/folder_paths.py index 99a016695..e5b89492c 100644 --- a/folder_paths.py +++ b/folder_paths.py @@ -71,7 +71,7 @@ def get_directory_by_type(type_name): # determine base_dir rely on annotation if name is 'filename.ext [annotation]' format # otherwise use default_path as base_dir -def touch_annotated_filepath(name): +def annotated_filepath(name): if name.endswith("[output]"): base_dir = get_output_directory() name = name[:-9] @@ -88,7 +88,7 @@ def touch_annotated_filepath(name): def get_annotated_filepath(name, default_dir=None): - name, base_dir = touch_annotated_filepath(name) + name, base_dir = annotated_filepath(name) if base_dir is None: if default_dir is not None: @@ -100,7 +100,7 @@ def get_annotated_filepath(name, default_dir=None): def exists_annotated_filepath(name): - name, base_dir = touch_annotated_filepath(name) + name, base_dir = annotated_filepath(name) if base_dir is None: base_dir = get_input_directory() # fallback path diff --git a/nodes.py b/nodes.py index b8b6280d6..d1133d1d8 100644 --- a/nodes.py +++ b/nodes.py @@ -974,8 +974,7 @@ class LoadImage: RETURN_TYPES = ("IMAGE", "MASK") FUNCTION = "load_image" def load_image(self, image): - input_dir = folder_paths.get_input_directory() - image_path = folder_paths.get_annotated_filepath(image, input_dir) + image_path = folder_paths.get_annotated_filepath(image) i = Image.open(image_path) image = i.convert("RGB") image = np.array(image).astype(np.float32) / 255.0 @@ -989,20 +988,27 @@ class LoadImage: @classmethod def IS_CHANGED(s, image): - input_dir = folder_paths.get_input_directory() - image_path = folder_paths.get_annotated_filepath(image, input_dir) + image_path = folder_paths.get_annotated_filepath(image) m = hashlib.sha256() with open(image_path, 'rb') as f: m.update(f.read()) return m.digest().hex() + @classmethod + def VALIDATE_INPUTS(s, image): + if not folder_paths.exists_annotated_filepath(image): + return "Invalid image file: {}".format(image) + + return True + class LoadImageMask: + _color_channels = ["alpha", "red", "green", "blue"] @classmethod def INPUT_TYPES(s): input_dir = folder_paths.get_input_directory() return {"required": {"image": (sorted(os.listdir(input_dir)), ), - "channel": (["alpha", "red", "green", "blue"], ),} + "channel": (s._color_channels, ),} } CATEGORY = "mask" @@ -1010,8 +1016,7 @@ class LoadImageMask: RETURN_TYPES = ("MASK",) FUNCTION = "load_image" def load_image(self, image, channel): - input_dir = folder_paths.get_input_directory() - image_path = folder_paths.get_annotated_filepath(image, input_dir) + image_path = folder_paths.get_annotated_filepath(image) i = Image.open(image_path) if i.getbands() != ("R", "G", "B", "A"): i = i.convert("RGBA") @@ -1028,13 +1033,22 @@ class LoadImageMask: @classmethod def IS_CHANGED(s, image, channel): - input_dir = folder_paths.get_input_directory() - image_path = folder_paths.get_annotated_filepath(image, input_dir) + image_path = folder_paths.get_annotated_filepath(image) m = hashlib.sha256() with open(image_path, 'rb') as f: m.update(f.read()) return m.digest().hex() + @classmethod + def VALIDATE_INPUTS(s, image, channel): + if not folder_paths.exists_annotated_filepath(image): + return "Invalid image file: {}".format(image) + + if channel not in s._color_channels: + return "Invalid color channel: {}".format(channel) + + return True + class ImageScale: upscale_methods = ["nearest-exact", "bilinear", "area"] crop_methods = ["disabled", "center"] diff --git a/web/scripts/app.js b/web/scripts/app.js index b3e88d46f..a161bf40e 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -172,7 +172,7 @@ export class ComfyApp { ComfyApp.clipspace.widgets.forEach(({ type, name, value }) => { const prop = Object.values(this.widgets).find(obj => obj.type === type && obj.name === name); if (prop) { - prop.value = value; + prop.callback(value); } }); } From 0ac319fd81bcecea2aa35743da28088832e44707 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sun, 23 Apr 2023 22:44:38 -0400 Subject: [PATCH 16/56] Don't delete all outputs when execution gets interrupted. --- execution.py | 20 ++++++++------------ 1 file changed, 8 insertions(+), 12 deletions(-) diff --git a/execution.py b/execution.py index 115efcbda..31a208e78 100644 --- a/execution.py +++ b/execution.py @@ -40,15 +40,13 @@ def get_input_data(inputs, class_def, unique_id, outputs={}, prompt={}, extra_da input_data_all[x] = unique_id return input_data_all -def recursive_execute(server, prompt, outputs, current_item, extra_data={}): +def recursive_execute(server, prompt, outputs, current_item, extra_data, executed): unique_id = current_item inputs = prompt[unique_id]['inputs'] class_type = prompt[unique_id]['class_type'] class_def = nodes.NODE_CLASS_MAPPINGS[class_type] if unique_id in outputs: - return [] - - executed = [] + return for x in inputs: input_data = inputs[x] @@ -57,7 +55,7 @@ def recursive_execute(server, prompt, outputs, current_item, extra_data={}): input_unique_id = input_data[0] output_index = input_data[1] if input_unique_id not in outputs: - executed += recursive_execute(server, prompt, outputs, input_unique_id, extra_data) + recursive_execute(server, prompt, outputs, input_unique_id, extra_data, executed) input_data_all = get_input_data(inputs, class_def, unique_id, outputs, prompt, extra_data) if server.client_id is not None: @@ -72,7 +70,7 @@ def recursive_execute(server, prompt, outputs, current_item, extra_data={}): server.send_sync("executed", { "node": unique_id, "output": outputs[unique_id]["ui"] }, server.client_id) if "result" in outputs[unique_id]: outputs[unique_id] = outputs[unique_id]["result"] - return executed + [unique_id] + executed.add(unique_id) def recursive_will_execute(prompt, outputs, current_item): unique_id = current_item @@ -158,7 +156,7 @@ class PromptExecutor: recursive_output_delete_if_changed(prompt, self.old_prompt, self.outputs, x) current_outputs = set(self.outputs.keys()) - executed = [] + executed = set() try: to_execute = [] for x in prompt: @@ -181,12 +179,12 @@ class PromptExecutor: except: valid = False if valid: - executed += recursive_execute(self.server, prompt, self.outputs, x, extra_data) + recursive_execute(self.server, prompt, self.outputs, x, extra_data, executed) except Exception as e: print(traceback.format_exc()) to_delete = [] for o in self.outputs: - if o not in current_outputs: + if (o not in current_outputs) and (o not in executed): to_delete += [o] if o in self.old_prompt: d = self.old_prompt.pop(o) @@ -194,11 +192,9 @@ class PromptExecutor: for o in to_delete: d = self.outputs.pop(o) del d - else: - executed = set(executed) + finally: for x in executed: self.old_prompt[x] = copy.deepcopy(prompt[x]) - finally: self.server.last_node_id = None if self.server.client_id is not None: self.server.send_sync("executing", { "node": None }, self.server.client_id) From f1b87f50fa9c274f2dd9dbe24b082aa83ef0b028 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 24 Apr 2023 01:50:56 -0400 Subject: [PATCH 17/56] Add hypernetworks path config to extra_model_paths.yaml.example --- extra_model_paths.yaml.example | 1 + 1 file changed, 1 insertion(+) diff --git a/extra_model_paths.yaml.example b/extra_model_paths.yaml.example index ac1ffe9d2..fa5418a68 100644 --- a/extra_model_paths.yaml.example +++ b/extra_model_paths.yaml.example @@ -13,6 +13,7 @@ a111: models/ESRGAN models/SwinIR embeddings: embeddings + hypernetworks: models/hypernetworks controlnet: models/ControlNet #other_ui: From 4e345b31f692d5fb89009bf3352c922c2abe30e2 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 24 Apr 2023 02:36:06 -0400 Subject: [PATCH 18/56] Support all known hypernetworks. --- comfy_extras/nodes_hypernetwork.py | 30 ++++++++++++++++++++++++++---- 1 file changed, 26 insertions(+), 4 deletions(-) diff --git a/comfy_extras/nodes_hypernetwork.py b/comfy_extras/nodes_hypernetwork.py index db2f8695c..c08c2c811 100644 --- a/comfy_extras/nodes_hypernetwork.py +++ b/comfy_extras/nodes_hypernetwork.py @@ -10,7 +10,17 @@ def load_hypernetwork_patch(path, strength): activate_output = sd.get('activate_output', False) last_layer_dropout = sd.get('last_layer_dropout', False) - if activation_func != 'linear' or is_layer_norm != False or use_dropout != False or activate_output != False or last_layer_dropout != False: + valid_activation = { + "linear": torch.nn.Identity, + "relu": torch.nn.ReLU, + "leakyrelu": torch.nn.LeakyReLU, + "elu": torch.nn.ELU, + "swish": torch.nn.Hardswish, + "tanh": torch.nn.Tanh, + "sigmoid": torch.nn.Sigmoid, + } + + if activation_func not in valid_activation: print("Unsupported Hypernetwork format, if you report it I might implement it.", path, " ", activation_func, is_layer_norm, use_dropout, activate_output, last_layer_dropout) return None @@ -28,15 +38,27 @@ def load_hypernetwork_patch(path, strength): keys = attn_weights.keys() linears = filter(lambda a: a.endswith(".weight"), keys) - linears = sorted(list(map(lambda a: a[:-len(".weight")], linears))) + linears = list(map(lambda a: a[:-len(".weight")], linears)) layers = [] - for lin_name in linears: + for i in range(len(linears)): + lin_name = linears[i] + last_layer = (i == (len(linears) - 1)) + penultimate_layer = (i == (len(linears) - 2)) + lin_weight = attn_weights['{}.weight'.format(lin_name)] lin_bias = attn_weights['{}.bias'.format(lin_name)] layer = torch.nn.Linear(lin_weight.shape[1], lin_weight.shape[0]) layer.load_state_dict({"weight": lin_weight, "bias": lin_bias}) - layers += [layer] + layers.append(layer) + if activation_func != "linear": + if (not last_layer) or (activate_output): + layers.append(valid_activation[activation_func]()) + if is_layer_norm: + layers.append(torch.nn.LayerNorm(lin_weight.shape[0])) + if use_dropout: + if (not last_layer) and (not penultimate_layer or last_layer_dropout): + layers.append(torch.nn.Dropout(p=0.3)) output.append(torch.nn.Sequential(*layers)) out[dim] = torch.nn.ModuleList(output) From 463bde66a1d22b02858ac6f148d7fa3e6d9c4322 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 24 Apr 2023 03:08:51 -0400 Subject: [PATCH 19/56] Add hypernetwork example link to readme. Move hypernetwork loader node to loaders. --- README.md | 1 + comfy_extras/nodes_hypernetwork.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index bf16006bf..5b6346a67 100644 --- a/README.md +++ b/README.md @@ -17,6 +17,7 @@ This ui will let you design and execute advanced stable diffusion pipelines usin - Can load ckpt, safetensors and diffusers models/checkpoints. Standalone VAEs and CLIP models. - Embeddings/Textual inversion - [Loras (regular, locon and loha)](https://comfyanonymous.github.io/ComfyUI_examples/lora/) +- [Hypernetworks](https://comfyanonymous.github.io/ComfyUI_examples/hypernetworks/) - Loading full workflows (with seeds) from generated PNG files. - Saving/Loading workflows as Json files. - Nodes interface can be used to create complex workflows like one for [Hires fix](https://comfyanonymous.github.io/ComfyUI_examples/2_pass_txt2img/) or much more advanced ones. diff --git a/comfy_extras/nodes_hypernetwork.py b/comfy_extras/nodes_hypernetwork.py index c08c2c811..0c7250e43 100644 --- a/comfy_extras/nodes_hypernetwork.py +++ b/comfy_extras/nodes_hypernetwork.py @@ -93,7 +93,7 @@ class HypernetworkLoader: RETURN_TYPES = ("MODEL",) FUNCTION = "load_hypernetwork" - CATEGORY = "_for_testing" + CATEGORY = "loaders" def load_hypernetwork(self, model, hypernetwork_name, strength): hypernetwork_path = folder_paths.get_full_path("hypernetworks", hypernetwork_name) From d9b1595f8552384dd08374d34c4d4127e0b1a4e6 Mon Sep 17 00:00:00 2001 From: BlenderNeko <126974546+BlenderNeko@users.noreply.github.com> Date: Mon, 24 Apr 2023 12:53:10 +0200 Subject: [PATCH 20/56] made sample functions more explicit --- comfy/sample.py | 55 +++++++++++++++++++++---------------------------- nodes.py | 7 +++++-- 2 files changed, 29 insertions(+), 33 deletions(-) diff --git a/comfy/sample.py b/comfy/sample.py index 981781b53..84eefcb7b 100644 --- a/comfy/sample.py +++ b/comfy/sample.py @@ -2,30 +2,25 @@ import torch import comfy.model_management -def prepare_noise(latent, seed): - """creates random noise given a LATENT and a seed""" - latent_image = latent["samples"] - batch_index = 0 - if "batch_index" in latent: - batch_index = latent["batch_index"] - +def prepare_noise(latent_image, seed, skip=0): + """ + creates random noise given a latent image and a seed. + optional arg skip can be used to skip and discard x number of noise generations for a given seed + """ generator = torch.manual_seed(seed) - for i in range(batch_index): + for _ in range(skip): noise = torch.randn([1] + list(latent_image.size())[1:], dtype=latent_image.dtype, layout=latent_image.layout, generator=generator, device="cpu") noise = torch.randn(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, generator=generator, device="cpu") return noise -def create_mask(latent, noise): - """creates a mask for a given LATENT and noise""" - noise_mask = None +def prepare_mask(noise_mask, noise): + """ensures noise mask is of proper dimensions""" device = comfy.model_management.get_torch_device() - if "noise_mask" in latent: - noise_mask = latent['noise_mask'] - noise_mask = torch.nn.functional.interpolate(noise_mask[None,None,], size=(noise.shape[2], noise.shape[3]), mode="bilinear") - noise_mask = noise_mask.round() - noise_mask = torch.cat([noise_mask] * noise.shape[1], dim=1) - noise_mask = torch.cat([noise_mask] * noise.shape[0]) - noise_mask = noise_mask.to(device) + noise_mask = torch.nn.functional.interpolate(noise_mask[None,None,], size=(noise.shape[2], noise.shape[3]), mode="bilinear") + noise_mask = noise_mask.round() + noise_mask = torch.cat([noise_mask] * noise.shape[1], dim=1) + noise_mask = torch.cat([noise_mask] * noise.shape[0]) + noise_mask = noise_mask.to(device) return noise_mask def broadcast_cond(cond, noise): @@ -40,22 +35,20 @@ def broadcast_cond(cond, noise): copy += [[t] + p[1:]] return copy -def load_c_nets(positive, negative): - """loads control nets in positive and negative conditioning""" - def get_models(cond): - models = [] - for c in cond: - if 'control' in c[1]: - models += [c[1]['control']] - if 'gligen' in c[1]: - models += [c[1]['gligen'][1]] - return models - - return get_models(positive) + get_models(negative) +def get_models_from_cond(cond, model_type): + models = [] + for c in cond: + if model_type in c[1]: + models += [c[1][model_type]] + return models def load_additional_models(positive, negative): """loads additional models in positive and negative conditioning""" - models = load_c_nets(positive, negative) + models = [] + models += get_models_from_cond(positive, "control") + models += get_models_from_cond(negative, "control") + models += get_models_from_cond(positive, "gligen") + models += get_models_from_cond(negative, "gligen") comfy.model_management.load_controlnet_gpu(models) return models diff --git a/nodes.py b/nodes.py index b8c6d350f..f9bedc97e 100644 --- a/nodes.py +++ b/nodes.py @@ -747,9 +747,12 @@ def common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, if disable_noise: noise = torch.zeros(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, device="cpu") else: - noise = comfy.sample.prepare_noise(latent, seed) + skip = latent["batch_index"] if "batch_index" in latent else 0 + noise = comfy.sample.prepare_noise(latent_image, seed, skip) - noise_mask = comfy.sample.create_mask(latent, noise) + noise_mask = None + if "noise_mask" in latent: + noise_mask = comfy.sample.prepare_mask(latent["noise_mask"], noise) real_model = None comfy.model_management.load_model_gpu(model) From 0b07b2cc0f94fc2b8ebe656dfb3768c6f67866f1 Mon Sep 17 00:00:00 2001 From: BlenderNeko <126974546+BlenderNeko@users.noreply.github.com> Date: Mon, 24 Apr 2023 21:47:57 +0200 Subject: [PATCH 21/56] gligen tuple --- comfy/sample.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/comfy/sample.py b/comfy/sample.py index 84eefcb7b..09ab20cd2 100644 --- a/comfy/sample.py +++ b/comfy/sample.py @@ -44,11 +44,10 @@ def get_models_from_cond(cond, model_type): def load_additional_models(positive, negative): """loads additional models in positive and negative conditioning""" - models = [] - models += get_models_from_cond(positive, "control") - models += get_models_from_cond(negative, "control") - models += get_models_from_cond(positive, "gligen") - models += get_models_from_cond(negative, "gligen") + control_nets = get_models_from_cond(positive, "control") + get_models_from_cond(negative, "control") + gligen = get_models_from_cond(positive, "gligen") + get_models_from_cond(negative, "gligen") + gligen = [x[1] for x in gligen] + models = control_nets + gligen comfy.model_management.load_controlnet_gpu(models) return models From 36acce58e71bbe1bf835c2ec380dc7ac0c5b4752 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 24 Apr 2023 18:13:18 -0400 Subject: [PATCH 22/56] Auto increase the size of the image upload widget when there's an image. --- web/scripts/widgets.js | 3 +++ 1 file changed, 3 insertions(+) diff --git a/web/scripts/widgets.js b/web/scripts/widgets.js index 2acc5f2c0..238ad59dd 100644 --- a/web/scripts/widgets.js +++ b/web/scripts/widgets.js @@ -270,6 +270,9 @@ export const ComfyWidgets = { app.graph.setDirtyCanvas(true); }; img.src = `/view?filename=${name}&type=input`; + if ((node.size[1] - node.imageOffset) < 100) { + node.size[1] = 250 + node.imageOffset; + } } // Add our own callback to the combo widget to render an image when it changes From 7983b3a975c26b93601c8b6fa9a0a333b35794bd Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 24 Apr 2023 22:45:35 -0400 Subject: [PATCH 23/56] This is cleaner this way. --- comfy/samplers.py | 59 ++++++++++++++++++++--------------------------- 1 file changed, 25 insertions(+), 34 deletions(-) diff --git a/comfy/samplers.py b/comfy/samplers.py index 46bdb82a0..26597ebba 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -400,38 +400,6 @@ def encode_adm(noise_augmentor, conds, batch_size, device): return conds -def calculate_sigmas(model, steps, scheduler, sampler): - """ - Returns a tensor containing the sigmas corresponding to the given model, number of steps, scheduler type and sample technique - """ - if not (isinstance(model, CompVisVDenoiser) or isinstance(model, k_diffusion_external.CompVisDenoiser)): - model = CFGNoisePredictor(model) - if model.inner_model.parameterization == "v": - model = CompVisVDenoiser(model, quantize=True) - else: - model = k_diffusion_external.CompVisDenoiser(model, quantize=True) - - sigmas = None - - discard_penultimate_sigma = False - if sampler in ['dpm_2', 'dpm_2_ancestral']: - steps += 1 - discard_penultimate_sigma = True - - if scheduler == "karras": - sigmas = k_diffusion_sampling.get_sigmas_karras(n=steps, sigma_min=float(model.sigma_min), sigma_max=float(model.sigma_max)) - elif scheduler == "normal": - sigmas = model.get_sigmas(steps) - elif scheduler == "simple": - sigmas = simple_scheduler(model, steps) - elif scheduler == "ddim_uniform": - sigmas = ddim_scheduler(model, steps) - else: - print("error invalid scheduler", scheduler) - - if discard_penultimate_sigma: - sigmas = torch.cat([sigmas[:-2], sigmas[-1:]]) - return sigmas class KSampler: SCHEDULERS = ["karras", "normal", "simple", "ddim_uniform"] @@ -461,13 +429,36 @@ class KSampler: self.denoise = denoise self.model_options = model_options + def calculate_sigmas(self, steps): + sigmas = None + + discard_penultimate_sigma = False + if self.sampler in ['dpm_2', 'dpm_2_ancestral']: + steps += 1 + discard_penultimate_sigma = True + + if self.scheduler == "karras": + sigmas = k_diffusion_sampling.get_sigmas_karras(n=steps, sigma_min=self.sigma_min, sigma_max=self.sigma_max) + elif self.scheduler == "normal": + sigmas = self.model_wrap.get_sigmas(steps) + elif self.scheduler == "simple": + sigmas = simple_scheduler(self.model_wrap, steps) + elif self.scheduler == "ddim_uniform": + sigmas = ddim_scheduler(self.model_wrap, steps) + else: + print("error invalid scheduler", self.scheduler) + + if discard_penultimate_sigma: + sigmas = torch.cat([sigmas[:-2], sigmas[-1:]]) + return sigmas + def set_steps(self, steps, denoise=None): self.steps = steps if denoise is None or denoise > 0.9999: - self.sigmas = calculate_sigmas(self.model_wrap, steps, self.scheduler, self.sampler).to(self.device) + self.sigmas = self.calculate_sigmas(steps).to(self.device) else: new_steps = int(steps/denoise) - sigmas = calculate_sigmas(self.model_wrap, new_steps, self.scheduler, self.sampler).to(self.device) + sigmas = self.calculate_sigmas(new_steps).to(self.device) self.sigmas = sigmas[-(steps + 1):] From c50208a703c6eba2363b08c4cb62e903a3012710 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 24 Apr 2023 23:25:51 -0400 Subject: [PATCH 24/56] Refactor more code to sample.py --- comfy/sample.py | 47 ++++++++++++++++++++++++++++++++++++----------- nodes.py | 28 ++++------------------------ 2 files changed, 40 insertions(+), 35 deletions(-) diff --git a/comfy/sample.py b/comfy/sample.py index 09ab20cd2..d6848f9d5 100644 --- a/comfy/sample.py +++ b/comfy/sample.py @@ -1,5 +1,6 @@ import torch import comfy.model_management +import comfy.samplers def prepare_noise(latent_image, seed, skip=0): @@ -13,24 +14,22 @@ def prepare_noise(latent_image, seed, skip=0): noise = torch.randn(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, generator=generator, device="cpu") return noise -def prepare_mask(noise_mask, noise): +def prepare_mask(noise_mask, shape, device): """ensures noise mask is of proper dimensions""" - device = comfy.model_management.get_torch_device() - noise_mask = torch.nn.functional.interpolate(noise_mask[None,None,], size=(noise.shape[2], noise.shape[3]), mode="bilinear") + noise_mask = torch.nn.functional.interpolate(noise_mask[None,None,], size=(shape[2], shape[3]), mode="bilinear") noise_mask = noise_mask.round() - noise_mask = torch.cat([noise_mask] * noise.shape[1], dim=1) - noise_mask = torch.cat([noise_mask] * noise.shape[0]) + noise_mask = torch.cat([noise_mask] * shape[1], dim=1) + noise_mask = torch.cat([noise_mask] * shape[0]) noise_mask = noise_mask.to(device) return noise_mask -def broadcast_cond(cond, noise): - """broadcasts conditioning to the noise batch size""" - device = comfy.model_management.get_torch_device() +def broadcast_cond(cond, batch, device): + """broadcasts conditioning to the batch size""" copy = [] for p in cond: t = p[0] - if t.shape[0] < noise.shape[0]: - t = torch.cat([t] * noise.shape[0]) + if t.shape[0] < batch: + t = torch.cat([t] * batch) t = t.to(device) copy += [[t] + p[1:]] return copy @@ -54,4 +53,30 @@ def load_additional_models(positive, negative): def cleanup_additional_models(models): """cleanup additional models that were loaded""" for m in models: - m.cleanup() \ No newline at end of file + m.cleanup() + +def sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=1.0, disable_noise=False, start_step=None, last_step=None, force_full_denoise=False, noise_mask=None, sigmas=None): + device = comfy.model_management.get_torch_device() + + if noise_mask is not None: + noise_mask = prepare_mask(noise_mask, noise.shape, device) + + real_model = None + comfy.model_management.load_model_gpu(model) + real_model = model.model + + noise = noise.to(device) + latent_image = latent_image.to(device) + + positive_copy = broadcast_cond(positive, noise.shape[0], device) + negative_copy = broadcast_cond(negative, noise.shape[0], device) + + models = load_additional_models(positive, negative) + + sampler = comfy.samplers.KSampler(real_model, steps=steps, device=device, sampler=sampler_name, scheduler=scheduler, denoise=denoise, model_options=model.model_options) + + samples = sampler.sample(noise, positive_copy, negative_copy, cfg=cfg, latent_image=latent_image, start_step=start_step, last_step=last_step, force_full_denoise=force_full_denoise, denoise_mask=noise_mask, sigmas=sigmas) + samples = samples.cpu() + + cleanup_additional_models(models) + return samples diff --git a/nodes.py b/nodes.py index f787fcf8a..0083f6ef8 100644 --- a/nodes.py +++ b/nodes.py @@ -752,31 +752,11 @@ def common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, noise_mask = None if "noise_mask" in latent: - noise_mask = comfy.sample.prepare_mask(latent["noise_mask"], noise) - - real_model = None - comfy.model_management.load_model_gpu(model) - real_model = model.model - - noise = noise.to(device) - latent_image = latent_image.to(device) - - positive_copy = comfy.sample.broadcast_cond(positive, noise) - negative_copy = comfy.sample.broadcast_cond(negative, noise) - - models = comfy.sample.load_additional_models(positive, negative) - - if sampler_name in comfy.samplers.KSampler.SAMPLERS: - sampler = comfy.samplers.KSampler(real_model, steps=steps, device=device, sampler=sampler_name, scheduler=scheduler, denoise=denoise, model_options=model.model_options) - else: - #other samplers - pass - - samples = sampler.sample(noise, positive_copy, negative_copy, cfg=cfg, latent_image=latent_image, start_step=start_step, last_step=last_step, force_full_denoise=force_full_denoise, denoise_mask=noise_mask) - samples = samples.cpu() - - comfy.sample.cleanup_additional_models(models) + noise_mask = latent["noise_mask"] + samples = comfy.sample.sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, + denoise=denoise, disable_noise=disable_noise, start_step=start_step, last_step=last_step, + force_full_denoise=force_full_denoise, noise_mask=noise_mask) out = latent.copy() out["samples"] = samples return (out, ) From aa57136dae83887e005ab6b0222dce4667b61bee Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 25 Apr 2023 01:12:40 -0400 Subject: [PATCH 25/56] Some fixes to the batch masks PR. --- comfy/sample.py | 7 ++++--- nodes.py | 10 +++------- 2 files changed, 7 insertions(+), 10 deletions(-) diff --git a/comfy/sample.py b/comfy/sample.py index d6848f9d5..5e4d26142 100644 --- a/comfy/sample.py +++ b/comfy/sample.py @@ -1,7 +1,7 @@ import torch import comfy.model_management import comfy.samplers - +import math def prepare_noise(latent_image, seed, skip=0): """ @@ -16,10 +16,11 @@ def prepare_noise(latent_image, seed, skip=0): def prepare_mask(noise_mask, shape, device): """ensures noise mask is of proper dimensions""" - noise_mask = torch.nn.functional.interpolate(noise_mask[None,None,], size=(shape[2], shape[3]), mode="bilinear") + noise_mask = torch.nn.functional.interpolate(noise_mask.reshape((-1, 1, noise_mask.shape[-2], noise_mask.shape[-1])), size=(shape[2], shape[3]), mode="bilinear") noise_mask = noise_mask.round() noise_mask = torch.cat([noise_mask] * shape[1], dim=1) - noise_mask = torch.cat([noise_mask] * shape[0]) + if noise_mask.shape[0] < shape[0]: + noise_mask = noise_mask.repeat(math.ceil(shape[0] / noise_mask.shape[0]), 1, 1, 1)[:shape[0]] noise_mask = noise_mask.to(device) return noise_mask diff --git a/nodes.py b/nodes.py index b0b61d676..0a9513bed 100644 --- a/nodes.py +++ b/nodes.py @@ -172,16 +172,12 @@ class VAEEncodeForInpaint: def encode(self, vae, pixels, mask): x = (pixels.shape[1] // 64) * 64 y = (pixels.shape[2] // 64) * 64 - if len(mask.shape) < 3: - mask = mask.unsqueeze(0).unsqueeze(0) - elif len(mask.shape) < 4: - mask = mask.unsqueeze(1) - mask = torch.nn.functional.interpolate(mask, size=(pixels.shape[1], pixels.shape[2]), mode="bilinear") + mask = torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(pixels.shape[1], pixels.shape[2]), mode="bilinear") pixels = pixels.clone() if pixels.shape[1] != x or pixels.shape[2] != y: pixels = pixels[:,:x,:y,:] - mask = mask[:,:x,:y,:] + mask = mask[:,:,:x,:y] #grow mask by a few pixels to keep things seamless in latent space kernel_tensor = torch.ones((1, 1, 6, 6)) @@ -193,7 +189,7 @@ class VAEEncodeForInpaint: pixels[:,:,:,i] += 0.5 t = vae.encode(pixels) - return ({"samples":t, "noise_mask": (mask_erosion[:,:x,:y,:].round())}, ) + return ({"samples":t, "noise_mask": (mask_erosion[:,:,:x,:y].round())}, ) class CheckpointLoader: @classmethod From 07194297fd41729f8b95352a710b9039ca2c99e8 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 25 Apr 2023 14:02:17 -0400 Subject: [PATCH 26/56] Python 3.7 support. --- comfy_extras/chainner_models/architecture/block.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/comfy_extras/chainner_models/architecture/block.py b/comfy_extras/chainner_models/architecture/block.py index 1abe1ed8f..214642cc4 100644 --- a/comfy_extras/chainner_models/architecture/block.py +++ b/comfy_extras/chainner_models/architecture/block.py @@ -4,7 +4,10 @@ from __future__ import annotations from collections import OrderedDict -from typing import Literal +try: + from typing import Literal +except ImportError: + from typing_extensions import Literal import torch import torch.nn as nn From ee3a12d283d76212f6771a9cace21d4a469c1ee8 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 25 Apr 2023 19:18:50 -0400 Subject: [PATCH 27/56] Update litegraph from upstream. --- web/lib/litegraph.core.js | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/web/lib/litegraph.core.js b/web/lib/litegraph.core.js index 4189a48c0..20ec35476 100644 --- a/web/lib/litegraph.core.js +++ b/web/lib/litegraph.core.js @@ -9953,11 +9953,11 @@ LGraphNode.prototype.executeAction = function(action) } break; case "slider": - var range = w.options.max - w.options.min; + var old_value = w.value; var nvalue = Math.clamp((x - 15) / (widget_width - 30), 0, 1); if(w.options.read_only) break; w.value = w.options.min + (w.options.max - w.options.min) * nvalue; - if (w.callback) { + if (old_value != w.value) { setTimeout(function() { inner_value_change(w, w.value); }, 20); @@ -10044,7 +10044,7 @@ LGraphNode.prototype.executeAction = function(action) if (event.click_time < 200 && delta == 0) { this.prompt("Value",w.value,function(v) { // check if v is a valid equation or a number - if (/^[0-9+\-*/()\s]+$/.test(v)) { + if (/^[0-9+\-*/()\s]+|\d+\.\d+$/.test(v)) { try {//solve the equation if possible v = eval(v); } catch (e) { } From 54251ad85e484d4e36df849dcd529837c775d690 Mon Sep 17 00:00:00 2001 From: Jake D <122334950+jwd-dev@users.noreply.github.com> Date: Wed, 26 Apr 2023 01:22:36 -0400 Subject: [PATCH 28/56] Colored MultilineWidget (#524) * fixes colors and z-index * light mode fix * Update widgets.js --- web/scripts/widgets.js | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/web/scripts/widgets.js b/web/scripts/widgets.js index 238ad59dd..c0e73ffa1 100644 --- a/web/scripts/widgets.js +++ b/web/scripts/widgets.js @@ -136,9 +136,11 @@ function addMultilineWidget(node, name, opts, app) { left: `${t.a * margin + t.e}px`, top: `${t.d * (y + widgetHeight - margin - 3) + t.f}px`, width: `${(widgetWidth - margin * 2 - 3) * t.a}px`, + background: (!node.color)?'':node.color, height: `${(this.parent.inputHeight - margin * 2 - 4) * t.d}px`, position: "absolute", - zIndex: 1, + color: (!node.color)?'':'white', + zIndex: app.graph._nodes.indexOf(node), fontSize: `${t.d * 10.0}px`, }); this.inputEl.hidden = !visible; From 951c0c2bbe11e48956a7c619faf0c2cc6e3abff5 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 26 Apr 2023 02:05:57 -0400 Subject: [PATCH 29/56] Don't keep cached outputs for removed nodes. --- execution.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/execution.py b/execution.py index 31a208e78..2c97e70d2 100644 --- a/execution.py +++ b/execution.py @@ -152,6 +152,15 @@ class PromptExecutor: self.server.client_id = None with torch.inference_mode(): + #delete cached outputs if nodes don't exist for them + to_delete = [] + for o in self.outputs: + if o not in prompt: + to_delete += [o] + for o in to_delete: + d = self.outputs.pop(o) + del d + for x in prompt: recursive_output_delete_if_changed(prompt, self.old_prompt, self.outputs, x) From 3a1f9dba20c89038b71d6ff74d4e600d375283b3 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 26 Apr 2023 02:13:56 -0400 Subject: [PATCH 30/56] If IS_CHANGED returns exception delete the output instead of crashing. --- execution.py | 46 +++++++++++++++++++++++++--------------------- 1 file changed, 25 insertions(+), 21 deletions(-) diff --git a/execution.py b/execution.py index 2c97e70d2..c19c10bc6 100644 --- a/execution.py +++ b/execution.py @@ -97,40 +97,44 @@ def recursive_output_delete_if_changed(prompt, old_prompt, outputs, current_item is_changed_old = '' is_changed = '' + to_delete = False if hasattr(class_def, 'IS_CHANGED'): if unique_id in old_prompt and 'is_changed' in old_prompt[unique_id]: is_changed_old = old_prompt[unique_id]['is_changed'] if 'is_changed' not in prompt[unique_id]: input_data_all = get_input_data(inputs, class_def, unique_id, outputs) if input_data_all is not None: - is_changed = class_def.IS_CHANGED(**input_data_all) - prompt[unique_id]['is_changed'] = is_changed + try: + is_changed = class_def.IS_CHANGED(**input_data_all) + prompt[unique_id]['is_changed'] = is_changed + except: + to_delete = True else: is_changed = prompt[unique_id]['is_changed'] if unique_id not in outputs: return True - to_delete = False - if is_changed != is_changed_old: - to_delete = True - elif unique_id not in old_prompt: - to_delete = True - elif inputs == old_prompt[unique_id]['inputs']: - for x in inputs: - input_data = inputs[x] + if not to_delete: + if is_changed != is_changed_old: + to_delete = True + elif unique_id not in old_prompt: + to_delete = True + elif inputs == old_prompt[unique_id]['inputs']: + for x in inputs: + input_data = inputs[x] - if isinstance(input_data, list): - input_unique_id = input_data[0] - output_index = input_data[1] - if input_unique_id in outputs: - to_delete = recursive_output_delete_if_changed(prompt, old_prompt, outputs, input_unique_id) - else: - to_delete = True - if to_delete: - break - else: - to_delete = True + if isinstance(input_data, list): + input_unique_id = input_data[0] + output_index = input_data[1] + if input_unique_id in outputs: + to_delete = recursive_output_delete_if_changed(prompt, old_prompt, outputs, input_unique_id) + else: + to_delete = True + if to_delete: + break + else: + to_delete = True if to_delete: d = outputs.pop(unique_id) From 5a971cecdbacb849340f2ea7b3bcd80cc6032d1a Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 27 Apr 2023 04:38:44 -0400 Subject: [PATCH 31/56] Add callback to sampler function. Callback format is: callback(step, x0, x) --- comfy/extra_samplers/uni_pc.py | 6 ++++-- comfy/sample.py | 4 ++-- comfy/samplers.py | 22 ++++++++++++++++------ 3 files changed, 22 insertions(+), 10 deletions(-) diff --git a/comfy/extra_samplers/uni_pc.py b/comfy/extra_samplers/uni_pc.py index e96cfc93a..2952be62d 100644 --- a/comfy/extra_samplers/uni_pc.py +++ b/comfy/extra_samplers/uni_pc.py @@ -712,7 +712,7 @@ class UniPC: def sample(self, x, timesteps, t_start=None, t_end=None, order=3, skip_type='time_uniform', method='singlestep', lower_order_final=True, denoise_to_zero=False, solver_type='dpm_solver', - atol=0.0078, rtol=0.05, corrector=False, + atol=0.0078, rtol=0.05, corrector=False, callback=None ): t_0 = 1. / self.noise_schedule.total_N if t_end is None else t_end t_T = self.noise_schedule.T if t_start is None else t_start @@ -766,6 +766,8 @@ class UniPC: if model_x is None: model_x = self.model_fn(x, vec_t) model_prev_list[-1] = model_x + if callback is not None: + callback(step_index, model_prev_list[-1], x) else: raise NotImplementedError() if denoise_to_zero: @@ -877,7 +879,7 @@ def sample_unipc(model, noise, image, sigmas, sampling_function, max_denoise, ex order = min(3, len(timesteps) - 1) uni_pc = UniPC(model_fn, ns, predict_x0=True, thresholding=False, noise_mask=noise_mask, masked_image=image, noise=noise, variant=variant) - x = uni_pc.sample(img, timesteps=timesteps, skip_type="time_uniform", method="multistep", order=order, lower_order_final=True) + x = uni_pc.sample(img, timesteps=timesteps, skip_type="time_uniform", method="multistep", order=order, lower_order_final=True, callback=callback) if not to_zero: x /= ns.marginal_alpha(timesteps[-1]) return x diff --git a/comfy/sample.py b/comfy/sample.py index 5e4d26142..f4132bbed 100644 --- a/comfy/sample.py +++ b/comfy/sample.py @@ -56,7 +56,7 @@ def cleanup_additional_models(models): for m in models: m.cleanup() -def sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=1.0, disable_noise=False, start_step=None, last_step=None, force_full_denoise=False, noise_mask=None, sigmas=None): +def sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=1.0, disable_noise=False, start_step=None, last_step=None, force_full_denoise=False, noise_mask=None, sigmas=None, callback=None): device = comfy.model_management.get_torch_device() if noise_mask is not None: @@ -76,7 +76,7 @@ def sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative sampler = comfy.samplers.KSampler(real_model, steps=steps, device=device, sampler=sampler_name, scheduler=scheduler, denoise=denoise, model_options=model.model_options) - samples = sampler.sample(noise, positive_copy, negative_copy, cfg=cfg, latent_image=latent_image, start_step=start_step, last_step=last_step, force_full_denoise=force_full_denoise, denoise_mask=noise_mask, sigmas=sigmas) + samples = sampler.sample(noise, positive_copy, negative_copy, cfg=cfg, latent_image=latent_image, start_step=start_step, last_step=last_step, force_full_denoise=force_full_denoise, denoise_mask=noise_mask, sigmas=sigmas, callback=callback) samples = samples.cpu() cleanup_additional_models(models) diff --git a/comfy/samplers.py b/comfy/samplers.py index 26597ebba..fc19ddcfc 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -462,7 +462,7 @@ class KSampler: self.sigmas = sigmas[-(steps + 1):] - def sample(self, noise, positive, negative, cfg, latent_image=None, start_step=None, last_step=None, force_full_denoise=False, denoise_mask=None, sigmas=None): + def sample(self, noise, positive, negative, cfg, latent_image=None, start_step=None, last_step=None, force_full_denoise=False, denoise_mask=None, sigmas=None, callback=None): if sigmas is None: sigmas = self.sigmas sigma_min = self.sigma_min @@ -527,9 +527,9 @@ class KSampler: with precision_scope(model_management.get_autocast_device(self.device)): if self.sampler == "uni_pc": - samples = uni_pc.sample_unipc(self.model_wrap, noise, latent_image, sigmas, sampling_function=sampling_function, max_denoise=max_denoise, extra_args=extra_args, noise_mask=denoise_mask) + samples = uni_pc.sample_unipc(self.model_wrap, noise, latent_image, sigmas, sampling_function=sampling_function, max_denoise=max_denoise, extra_args=extra_args, noise_mask=denoise_mask, callback=callback) elif self.sampler == "uni_pc_bh2": - samples = uni_pc.sample_unipc(self.model_wrap, noise, latent_image, sigmas, sampling_function=sampling_function, max_denoise=max_denoise, extra_args=extra_args, noise_mask=denoise_mask, variant='bh2') + samples = uni_pc.sample_unipc(self.model_wrap, noise, latent_image, sigmas, sampling_function=sampling_function, max_denoise=max_denoise, extra_args=extra_args, noise_mask=denoise_mask, callback=callback, variant='bh2') elif self.sampler == "ddim": timesteps = [] for s in range(sigmas.shape[0]): @@ -537,6 +537,11 @@ class KSampler: noise_mask = None if denoise_mask is not None: noise_mask = 1.0 - denoise_mask + + ddim_callback = None + if callback is not None: + ddim_callback = lambda pred_x0, i: callback(i, pred_x0, None) + sampler = DDIMSampler(self.model, device=self.device) sampler.make_schedule_timesteps(ddim_timesteps=timesteps, verbose=False) z_enc = sampler.stochastic_encode(latent_image, torch.tensor([len(timesteps) - 1] * noise.shape[0]).to(self.device), noise=noise, max_denoise=max_denoise) @@ -550,6 +555,7 @@ class KSampler: eta=0.0, x_T=z_enc, x0=latent_image, + img_callback=ddim_callback, denoise_function=sampling_function, extra_args=extra_args, mask=noise_mask, @@ -563,13 +569,17 @@ class KSampler: noise = noise * sigmas[0] + k_callback = None + if callback is not None: + k_callback = lambda x: callback(x["i"], x["denoised"], x["x"]) + if latent_image is not None: noise += latent_image if self.sampler == "dpm_fast": - samples = k_diffusion_sampling.sample_dpm_fast(self.model_k, noise, sigma_min, sigmas[0], self.steps, extra_args=extra_args) + samples = k_diffusion_sampling.sample_dpm_fast(self.model_k, noise, sigma_min, sigmas[0], self.steps, extra_args=extra_args, callback=k_callback) elif self.sampler == "dpm_adaptive": - samples = k_diffusion_sampling.sample_dpm_adaptive(self.model_k, noise, sigma_min, sigmas[0], extra_args=extra_args) + samples = k_diffusion_sampling.sample_dpm_adaptive(self.model_k, noise, sigma_min, sigmas[0], extra_args=extra_args, callback=k_callback) else: - samples = getattr(k_diffusion_sampling, "sample_{}".format(self.sampler))(self.model_k, noise, sigmas, extra_args=extra_args) + samples = getattr(k_diffusion_sampling, "sample_{}".format(self.sampler))(self.model_k, noise, sigmas, extra_args=extra_args, callback=k_callback) return samples.to(torch.float32) From e958dfdd4d34ad160c50a32e01b5ce08c4e62a29 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 27 Apr 2023 10:59:47 -0400 Subject: [PATCH 32/56] Make notebook work on python3.7 --- notebooks/comfyui_colab.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/notebooks/comfyui_colab.ipynb b/notebooks/comfyui_colab.ipynb index c1982d8be..fecfa6707 100644 --- a/notebooks/comfyui_colab.ipynb +++ b/notebooks/comfyui_colab.ipynb @@ -47,7 +47,7 @@ " !git pull\n", "\n", "!echo -= Install dependencies =-\n", - "!pip install xformers!=0.0.18 -r requirements.txt --extra-index-url https://download.pytorch.org/whl/cu118" + "!pip install xformers!=0.0.18 -r requirements.txt --extra-index-url https://download.pytorch.org/whl/cu118 --extra-index-url https://download.pytorch.org/whl/cu117" ] }, { From e214c917ae889b278a05fa6e8b8c42d2cc8818fa Mon Sep 17 00:00:00 2001 From: Jacob Segal Date: Tue, 25 Apr 2023 00:15:25 -0700 Subject: [PATCH 33/56] Add Condition by Mask node This PR adds support for a Condition by Mask node. This node allows conditioning to be limited to a non-rectangle area. --- comfy/samplers.py | 88 +++++++++++++++++++++++++++++++++++++++-------- nodes.py | 28 +++++++++++++++ 2 files changed, 101 insertions(+), 15 deletions(-) diff --git a/comfy/samplers.py b/comfy/samplers.py index fc19ddcfc..6fa754b90 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -6,6 +6,7 @@ import contextlib from comfy import model_management from .ldm.models.diffusion.ddim import DDIMSampler from .ldm.modules.diffusionmodules.util import make_ddim_timesteps +from torchvision.ops import masks_to_boxes #The main sampling function shared by all the samplers #Returns predicted noise @@ -23,21 +24,34 @@ def sampling_function(model_function, x, timestep, uncond, cond, cond_scale, con adm_cond = cond[1]['adm_encoded'] input_x = x_in[:,:,area[2]:area[0] + area[2],area[3]:area[1] + area[3]] - mult = torch.ones_like(input_x) * strength + if 'mask' in cond[1]: + # Scale the mask to the size of the input + # The mask should have been resized as we began the sampling process + mask = cond[1]['mask'] + assert(mask.shape[1] == x_in.shape[2]) + assert(mask.shape[2] == x_in.shape[3]) + mask = mask[:,area[2]:area[0] + area[2],area[3]:area[1] + area[3]] + if mask.shape[0] != input_x.shape[0]: + mask = mask.repeat(input_x.shape[0], 1, 1) + else: + mask = torch.ones_like(input_x) + mult = mask * strength + + if 'mask' not in cond[1]: + rr = 8 + if area[2] != 0: + for t in range(rr): + mult[:,:,t:1+t,:] *= ((1.0/rr) * (t + 1)) + if (area[0] + area[2]) < x_in.shape[2]: + for t in range(rr): + mult[:,:,area[0] - 1 - t:area[0] - t,:] *= ((1.0/rr) * (t + 1)) + if area[3] != 0: + for t in range(rr): + mult[:,:,:,t:1+t] *= ((1.0/rr) * (t + 1)) + if (area[1] + area[3]) < x_in.shape[3]: + for t in range(rr): + mult[:,:,:,area[1] - 1 - t:area[1] - t] *= ((1.0/rr) * (t + 1)) - rr = 8 - if area[2] != 0: - for t in range(rr): - mult[:,:,t:1+t,:] *= ((1.0/rr) * (t + 1)) - if (area[0] + area[2]) < x_in.shape[2]: - for t in range(rr): - mult[:,:,area[0] - 1 - t:area[0] - t,:] *= ((1.0/rr) * (t + 1)) - if area[3] != 0: - for t in range(rr): - mult[:,:,:,t:1+t] *= ((1.0/rr) * (t + 1)) - if (area[1] + area[3]) < x_in.shape[3]: - for t in range(rr): - mult[:,:,:,area[1] - 1 - t:area[1] - t] *= ((1.0/rr) * (t + 1)) conditionning = {} conditionning['c_crossattn'] = cond[0] if cond_concat_in is not None and len(cond_concat_in) > 0: @@ -301,6 +315,47 @@ def blank_inpaint_image_like(latent_image): blank_image[:,3] *= 0.1380 return blank_image +def resolve_cond_masks(conditions, h, w, device): + # We need to decide on an area outside the sampling loop in order to properly generate opposite areas of equal sizes. + # While we're doing this, we can also resolve the mask device and scaling for performance reasons + for i in range(len(conditions)): + c = conditions[i] + if 'mask' in c[1]: + mask = c[1]['mask'] + mask = mask.to(device=device) + modified = c[1].copy() + if len(mask.shape) == 2: + mask = mask.unsqueeze(0) + if mask.shape[2] != h or mask.shape[3] != w: + mask = torch.nn.functional.interpolate(mask.unsqueeze(1), size=(h, w), mode='bilinear', align_corners=False).squeeze(1) + + if 'area' not in modified: + bounds = torch.max(torch.abs(mask),dim=0).values.unsqueeze(0) + if torch.max(bounds) == 0: + # Handle the edge-case of an all black mask (where masks_to_boxes would error) + area = (0, 0, 0, 0) + else: + box = masks_to_boxes(bounds)[0].type(torch.int) + H, W, Y, X = (box[3] - box[1] + 1, box[2] - box[0] + 1, box[1], box[0]) + # Make sure the height and width are divisible by 8 + if X % 8 != 0: + newx = X // 8 * 8 + W = W + (X - newx) + X = newx + if Y % 8 != 0: + newy = Y // 8 * 8 + H = H + (Y - newy) + Y = newy + if H % 8 != 0: + H = H + (8 - (H % 8)) + if W % 8 != 0: + W = W + (8 - (W % 8)) + area = (int(H), int(W), int(Y), (X)) + modified['area'] = area + + modified['mask'] = mask + conditions[i] = [c[0], modified] + def create_cond_with_same_area_if_none(conds, c): if 'area' not in c[1]: return @@ -461,7 +516,6 @@ class KSampler: sigmas = self.calculate_sigmas(new_steps).to(self.device) self.sigmas = sigmas[-(steps + 1):] - def sample(self, noise, positive, negative, cfg, latent_image=None, start_step=None, last_step=None, force_full_denoise=False, denoise_mask=None, sigmas=None, callback=None): if sigmas is None: sigmas = self.sigmas @@ -484,6 +538,10 @@ class KSampler: positive = positive[:] negative = negative[:] + + resolve_cond_masks(positive, noise.shape[2], noise.shape[3], self.device) + resolve_cond_masks(negative, noise.shape[2], noise.shape[3], self.device) + #make sure each cond area has an opposite one with the same area for c in positive: create_cond_with_same_area_if_none(negative, c) diff --git a/nodes.py b/nodes.py index 0a9513bed..be02f4676 100644 --- a/nodes.py +++ b/nodes.py @@ -85,6 +85,32 @@ class ConditioningSetArea: c.append(n) return (c, ) +class ConditioningSetMask: + @classmethod + def INPUT_TYPES(s): + return {"required": {"conditioning": ("CONDITIONING", ), + "mask": ("MASK", ), + "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), + }} + RETURN_TYPES = ("CONDITIONING",) + FUNCTION = "append" + + CATEGORY = "conditioning" + + def append(self, conditioning, mask, strength, min_sigma=0.0, max_sigma=99.0): + c = [] + if len(mask.shape) < 3: + mask = mask.unsqueeze(0) + for t in conditioning: + n = [t[0], t[1].copy()] + _, h, w = mask.shape + n[1]['mask'] = mask + n[1]['strength'] = strength + n[1]['min_sigma'] = min_sigma + n[1]['max_sigma'] = max_sigma + c.append(n) + return (c, ) + class VAEDecode: def __init__(self, device="cpu"): self.device = device @@ -1115,6 +1141,7 @@ NODE_CLASS_MAPPINGS = { "ImagePadForOutpaint": ImagePadForOutpaint, "ConditioningCombine": ConditioningCombine, "ConditioningSetArea": ConditioningSetArea, + "ConditioningSetMask": ConditioningSetMask, "KSamplerAdvanced": KSamplerAdvanced, "SetLatentNoiseMask": SetLatentNoiseMask, "LatentComposite": LatentComposite, @@ -1164,6 +1191,7 @@ NODE_DISPLAY_NAME_MAPPINGS = { "CLIPSetLastLayer": "CLIP Set Last Layer", "ConditioningCombine": "Conditioning (Combine)", "ConditioningSetArea": "Conditioning (Set Area)", + "ConditioningSetMask": "Conditioning (Set Mask)", "ControlNetApply": "Apply ControlNet", # Latent "VAEEncodeForInpaint": "VAE Encode (for Inpainting)", From 27bf9392ac1ef07776d31895b748c7ea84969115 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 28 Apr 2023 08:35:20 -0400 Subject: [PATCH 34/56] Switch stable standalone dependencies to stable xformers. Switch nightly standalone to cu121. --- .github/workflows/windows_release_cu118_dependencies_2.yml | 2 +- .github/workflows/windows_release_nightly_pytorch.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/windows_release_cu118_dependencies_2.yml b/.github/workflows/windows_release_cu118_dependencies_2.yml index a88449527..42adee9e7 100644 --- a/.github/workflows/windows_release_cu118_dependencies_2.yml +++ b/.github/workflows/windows_release_cu118_dependencies_2.yml @@ -17,7 +17,7 @@ jobs: - shell: bash run: | - python -m pip wheel --no-cache-dir torch torchvision torchaudio xformers==0.0.19.dev516 --extra-index-url https://download.pytorch.org/whl/cu118 -r requirements.txt pygit2 -w ./temp_wheel_dir + python -m pip wheel --no-cache-dir torch torchvision torchaudio xformers --extra-index-url https://download.pytorch.org/whl/cu118 -r requirements.txt pygit2 -w ./temp_wheel_dir python -m pip install --no-cache-dir ./temp_wheel_dir/* echo installed basic ls -lah temp_wheel_dir diff --git a/.github/workflows/windows_release_nightly_pytorch.yml b/.github/workflows/windows_release_nightly_pytorch.yml index 291d754e3..32d2f320b 100644 --- a/.github/workflows/windows_release_nightly_pytorch.yml +++ b/.github/workflows/windows_release_nightly_pytorch.yml @@ -30,7 +30,7 @@ jobs: echo 'import site' >> ./python310._pth curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py ./python.exe get-pip.py - python -m pip wheel torch torchvision torchaudio --pre --extra-index-url https://download.pytorch.org/whl/nightly/cu118 -r ../ComfyUI/requirements.txt pygit2 -w ../temp_wheel_dir + python -m pip wheel torch torchvision torchaudio --pre --extra-index-url https://download.pytorch.org/whl/nightly/cu121 -r ../ComfyUI/requirements.txt pygit2 -w ../temp_wheel_dir ls ../temp_wheel_dir ./python.exe -s -m pip install --pre ../temp_wheel_dir/* sed -i '1i../ComfyUI' ./python310._pth From e543ecad6991fc7e71dd2042b439aefb9c0722de Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 28 Apr 2023 08:50:12 -0400 Subject: [PATCH 35/56] Fix the nightly build not being packaged correctly. --- .ci/nightly/update_windows/update.py | 65 ------------------- .ci/nightly/update_windows/update_comfyui.bat | 2 - ...update_comfyui_and_python_dependencies.bat | 2 +- .../README_VERY_IMPORTANT.txt | 27 -------- .ci/nightly/windows_base_files/run_cpu.bat | 2 - .../windows_release_nightly_pytorch.yml | 2 + 6 files changed, 3 insertions(+), 97 deletions(-) delete mode 100755 .ci/nightly/update_windows/update.py delete mode 100755 .ci/nightly/update_windows/update_comfyui.bat delete mode 100755 .ci/nightly/windows_base_files/README_VERY_IMPORTANT.txt delete mode 100755 .ci/nightly/windows_base_files/run_cpu.bat diff --git a/.ci/nightly/update_windows/update.py b/.ci/nightly/update_windows/update.py deleted file mode 100755 index c09f29a80..000000000 --- a/.ci/nightly/update_windows/update.py +++ /dev/null @@ -1,65 +0,0 @@ -import pygit2 -from datetime import datetime -import sys - -def pull(repo, remote_name='origin', branch='master'): - for remote in repo.remotes: - if remote.name == remote_name: - remote.fetch() - remote_master_id = repo.lookup_reference('refs/remotes/origin/%s' % (branch)).target - merge_result, _ = repo.merge_analysis(remote_master_id) - # Up to date, do nothing - if merge_result & pygit2.GIT_MERGE_ANALYSIS_UP_TO_DATE: - return - # We can just fastforward - elif merge_result & pygit2.GIT_MERGE_ANALYSIS_FASTFORWARD: - repo.checkout_tree(repo.get(remote_master_id)) - try: - master_ref = repo.lookup_reference('refs/heads/%s' % (branch)) - master_ref.set_target(remote_master_id) - except KeyError: - repo.create_branch(branch, repo.get(remote_master_id)) - repo.head.set_target(remote_master_id) - elif merge_result & pygit2.GIT_MERGE_ANALYSIS_NORMAL: - repo.merge(remote_master_id) - - if repo.index.conflicts is not None: - for conflict in repo.index.conflicts: - print('Conflicts found in:', conflict[0].path) - raise AssertionError('Conflicts, ahhhhh!!') - - user = repo.default_signature - tree = repo.index.write_tree() - commit = repo.create_commit('HEAD', - user, - user, - 'Merge!', - tree, - [repo.head.target, remote_master_id]) - # We need to do this or git CLI will think we are still merging. - repo.state_cleanup() - else: - raise AssertionError('Unknown merge analysis result') - - -repo = pygit2.Repository(str(sys.argv[1])) -ident = pygit2.Signature('comfyui', 'comfy@ui') -try: - print("stashing current changes") - repo.stash(ident) -except KeyError: - print("nothing to stash") -backup_branch_name = 'backup_branch_{}'.format(datetime.today().strftime('%Y-%m-%d_%H_%M_%S')) -print("creating backup branch: {}".format(backup_branch_name)) -repo.branches.local.create(backup_branch_name, repo.head.peel()) - -print("checking out master branch") -branch = repo.lookup_branch('master') -ref = repo.lookup_reference(branch.name) -repo.checkout(ref) - -print("pulling latest changes") -pull(repo) - -print("Done!") - diff --git a/.ci/nightly/update_windows/update_comfyui.bat b/.ci/nightly/update_windows/update_comfyui.bat deleted file mode 100755 index 60d1e694f..000000000 --- a/.ci/nightly/update_windows/update_comfyui.bat +++ /dev/null @@ -1,2 +0,0 @@ -..\python_embeded\python.exe .\update.py ..\ComfyUI\ -pause diff --git a/.ci/nightly/update_windows/update_comfyui_and_python_dependencies.bat b/.ci/nightly/update_windows/update_comfyui_and_python_dependencies.bat index c5e0c6be7..c345a6992 100755 --- a/.ci/nightly/update_windows/update_comfyui_and_python_dependencies.bat +++ b/.ci/nightly/update_windows/update_comfyui_and_python_dependencies.bat @@ -1,3 +1,3 @@ ..\python_embeded\python.exe .\update.py ..\ComfyUI\ -..\python_embeded\python.exe -s -m pip install --upgrade --pre torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu118 -r ../ComfyUI/requirements.txt pygit2 +..\python_embeded\python.exe -s -m pip install --upgrade --pre torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu121 -r ../ComfyUI/requirements.txt pygit2 pause diff --git a/.ci/nightly/windows_base_files/README_VERY_IMPORTANT.txt b/.ci/nightly/windows_base_files/README_VERY_IMPORTANT.txt deleted file mode 100755 index 656b9db43..000000000 --- a/.ci/nightly/windows_base_files/README_VERY_IMPORTANT.txt +++ /dev/null @@ -1,27 +0,0 @@ -HOW TO RUN: - -if you have a NVIDIA gpu: - -run_nvidia_gpu.bat - - - -To run it in slow CPU mode: - -run_cpu.bat - - - -IF YOU GET A RED ERROR IN THE UI MAKE SURE YOU HAVE A MODEL/CHECKPOINT IN: ComfyUI\models\checkpoints - -You can download the stable diffusion 1.5 one from: https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.ckpt - - - -RECOMMENDED WAY TO UPDATE: -To update the ComfyUI code: update\update_comfyui.bat - - - -To update ComfyUI with the python dependencies: -update\update_comfyui_and_python_dependencies.bat diff --git a/.ci/nightly/windows_base_files/run_cpu.bat b/.ci/nightly/windows_base_files/run_cpu.bat deleted file mode 100755 index c3ba41721..000000000 --- a/.ci/nightly/windows_base_files/run_cpu.bat +++ /dev/null @@ -1,2 +0,0 @@ -.\python_embeded\python.exe -s ComfyUI\main.py --cpu --windows-standalone-build -pause diff --git a/.github/workflows/windows_release_nightly_pytorch.yml b/.github/workflows/windows_release_nightly_pytorch.yml index 32d2f320b..4d686ded8 100644 --- a/.github/workflows/windows_release_nightly_pytorch.yml +++ b/.github/workflows/windows_release_nightly_pytorch.yml @@ -46,6 +46,8 @@ jobs: mkdir update cp -r ComfyUI/.ci/update_windows/* ./update/ cp -r ComfyUI/.ci/windows_base_files/* ./ + cp -r ComfyUI/.ci/nightly/update_windows/* ./update/ + cp -r ComfyUI/.ci/nightly/windows_base_files/* ./ cd .. From ab9a9deff48b5780bd105dfd6d19f5f8333ef608 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 28 Apr 2023 09:03:39 -0400 Subject: [PATCH 36/56] Fix nightly CI builds. No cu121 builds for windows yet. --- .../update_windows/update_comfyui_and_python_dependencies.bat | 2 +- .github/workflows/windows_release_nightly_pytorch.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.ci/nightly/update_windows/update_comfyui_and_python_dependencies.bat b/.ci/nightly/update_windows/update_comfyui_and_python_dependencies.bat index c345a6992..b4989534f 100755 --- a/.ci/nightly/update_windows/update_comfyui_and_python_dependencies.bat +++ b/.ci/nightly/update_windows/update_comfyui_and_python_dependencies.bat @@ -1,3 +1,3 @@ ..\python_embeded\python.exe .\update.py ..\ComfyUI\ -..\python_embeded\python.exe -s -m pip install --upgrade --pre torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu121 -r ../ComfyUI/requirements.txt pygit2 +..\python_embeded\python.exe -s -m pip install --upgrade --pre torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/nightly/cu118 -r ../ComfyUI/requirements.txt pygit2 pause diff --git a/.github/workflows/windows_release_nightly_pytorch.yml b/.github/workflows/windows_release_nightly_pytorch.yml index 4d686ded8..f23cae6d5 100644 --- a/.github/workflows/windows_release_nightly_pytorch.yml +++ b/.github/workflows/windows_release_nightly_pytorch.yml @@ -30,7 +30,7 @@ jobs: echo 'import site' >> ./python310._pth curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py ./python.exe get-pip.py - python -m pip wheel torch torchvision torchaudio --pre --extra-index-url https://download.pytorch.org/whl/nightly/cu121 -r ../ComfyUI/requirements.txt pygit2 -w ../temp_wheel_dir + python -m pip wheel torch torchvision torchaudio --pre --extra-index-url https://download.pytorch.org/whl/nightly/cu118 -r ../ComfyUI/requirements.txt pygit2 -w ../temp_wheel_dir ls ../temp_wheel_dir ./python.exe -s -m pip install --pre ../temp_wheel_dir/* sed -i '1i../ComfyUI' ./python310._pth From 3baded9892a6ac02f57caaf68053791ec0e14c5a Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 28 Apr 2023 14:28:57 -0400 Subject: [PATCH 37/56] Basic torch_directml support. Use --directml to use it. --- comfy/cli_args.py | 1 + comfy/model_management.py | 27 ++++++++++++++++++++++++++- 2 files changed, 27 insertions(+), 1 deletion(-) diff --git a/comfy/cli_args.py b/comfy/cli_args.py index b24054ce0..05b9c5e08 100644 --- a/comfy/cli_args.py +++ b/comfy/cli_args.py @@ -10,6 +10,7 @@ parser.add_argument("--output-directory", type=str, default=None, help="Set the parser.add_argument("--cuda-device", type=int, default=None, metavar="DEVICE_ID", help="Set the id of the cuda device this instance will use.") parser.add_argument("--dont-upcast-attention", action="store_true", help="Disable upcasting of attention. Can boost speed but increase the chances of black images.") parser.add_argument("--force-fp32", action="store_true", help="Force fp32 (If this makes your GPU work better please report it).") +parser.add_argument("--directml", action="store_true", help="Use torch-directml.") attn_group = parser.add_mutually_exclusive_group() attn_group.add_argument("--use-split-cross-attention", action="store_true", help="Use the split cross attention optimization instead of the sub-quadratic one. Ignored when xformers is used.") diff --git a/comfy/model_management.py b/comfy/model_management.py index 6e3a03530..339111c4d 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -20,6 +20,13 @@ total_vram_available_mb = -1 accelerate_enabled = False xpu_available = False +directml_enabled = False +if args.directml: + import torch_directml + print("Using directml") + directml_enabled = True + # torch_directml.disable_tiled_resources(True) + try: import torch try: @@ -217,6 +224,9 @@ def unload_if_low_vram(model): def get_torch_device(): global xpu_available + global directml_enabled + if directml_enabled: + return torch_directml.device() if vram_state == VRAMState.MPS: return torch.device("mps") if vram_state == VRAMState.CPU: @@ -234,8 +244,14 @@ def get_autocast_device(dev): def xformers_enabled(): + global xpu_available + global directml_enabled if vram_state == VRAMState.CPU: return False + if xpu_available: + return False + if directml_enabled: + return False return XFORMERS_IS_AVAILABLE @@ -251,6 +267,7 @@ def pytorch_attention_enabled(): def get_free_memory(dev=None, torch_free_too=False): global xpu_available + global directml_enabled if dev is None: dev = get_torch_device() @@ -258,7 +275,10 @@ def get_free_memory(dev=None, torch_free_too=False): mem_free_total = psutil.virtual_memory().available mem_free_torch = mem_free_total else: - if xpu_available: + if directml_enabled: + mem_free_total = 1024 * 1024 * 1024 #TODO + mem_free_torch = mem_free_total + elif xpu_available: mem_free_total = torch.xpu.get_device_properties(dev).total_memory - torch.xpu.memory_allocated(dev) mem_free_torch = mem_free_total else: @@ -293,9 +313,14 @@ def mps_mode(): def should_use_fp16(): global xpu_available + global directml_enabled + if FORCE_FP32: return False + if directml_enabled: + return False + if cpu_mode() or mps_mode() or xpu_available: return False #TODO ? From 0306371e54ddb7472622eb43ed2180a109be6e6b Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 28 Apr 2023 16:18:54 -0400 Subject: [PATCH 38/56] Add "Installing" link to top of readme. --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 5b6346a67..00b228497 100644 --- a/README.md +++ b/README.md @@ -7,6 +7,8 @@ A powerful and modular stable diffusion GUI and backend. This ui will let you design and execute advanced stable diffusion pipelines using a graph/nodes/flowchart based interface. For some workflow examples and see what ComfyUI can do you can check out: ### [ComfyUI Examples](https://comfyanonymous.github.io/ComfyUI_examples/) +### [Installing](#installing) + ## Features - Nodes/graph/flowchart interface to experiment and create complex Stable Diffusion workflows without needing to code anything. - Fully supports SD1.x and SD2.x From cab80973d187903d9c415cfcc2575e4616befaa8 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 28 Apr 2023 16:19:56 -0400 Subject: [PATCH 39/56] Fix Readme. --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 00b228497..3b3824714 100644 --- a/README.md +++ b/README.md @@ -7,7 +7,7 @@ A powerful and modular stable diffusion GUI and backend. This ui will let you design and execute advanced stable diffusion pipelines using a graph/nodes/flowchart based interface. For some workflow examples and see what ComfyUI can do you can check out: ### [ComfyUI Examples](https://comfyanonymous.github.io/ComfyUI_examples/) -### [Installing](#installing) +### [Installing ComfyUI](#installing) ## Features - Nodes/graph/flowchart interface to experiment and create complex Stable Diffusion workflows without needing to code anything. From 2ca934f7d4df3e4fa5a74172e5bbc1dd5e1a2ff9 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 28 Apr 2023 16:51:35 -0400 Subject: [PATCH 40/56] You can now select the device index with: --directml id Like this for example: --directml 1 --- comfy/cli_args.py | 2 +- comfy/model_management.py | 12 +++++++++--- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/comfy/cli_args.py b/comfy/cli_args.py index 05b9c5e08..764427165 100644 --- a/comfy/cli_args.py +++ b/comfy/cli_args.py @@ -10,7 +10,7 @@ parser.add_argument("--output-directory", type=str, default=None, help="Set the parser.add_argument("--cuda-device", type=int, default=None, metavar="DEVICE_ID", help="Set the id of the cuda device this instance will use.") parser.add_argument("--dont-upcast-attention", action="store_true", help="Disable upcasting of attention. Can boost speed but increase the chances of black images.") parser.add_argument("--force-fp32", action="store_true", help="Force fp32 (If this makes your GPU work better please report it).") -parser.add_argument("--directml", action="store_true", help="Use torch-directml.") +parser.add_argument("--directml", type=int, nargs="?", metavar="DIRECTML_DEVICE", const=-1, help="Use torch-directml.") attn_group = parser.add_mutually_exclusive_group() attn_group.add_argument("--use-split-cross-attention", action="store_true", help="Use the split cross attention optimization instead of the sub-quadratic one. Ignored when xformers is used.") diff --git a/comfy/model_management.py b/comfy/model_management.py index 339111c4d..9497ae7af 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -21,10 +21,15 @@ accelerate_enabled = False xpu_available = False directml_enabled = False -if args.directml: +if args.directml is not None: import torch_directml - print("Using directml") directml_enabled = True + device_index = args.directml + if device_index < 0: + directml_device = torch_directml.device() + else: + directml_device = torch_directml.device(device_index) + print("Using directml with device:", torch_directml.device_name(device_index)) # torch_directml.disable_tiled_resources(True) try: @@ -226,7 +231,8 @@ def get_torch_device(): global xpu_available global directml_enabled if directml_enabled: - return torch_directml.device() + global directml_device + return directml_device if vram_state == VRAMState.MPS: return torch.device("mps") if vram_state == VRAMState.CPU: From 056e5545ffafc7c396cd18d0737a9d5e40f81552 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sat, 29 Apr 2023 00:28:48 -0400 Subject: [PATCH 41/56] Don't try to get vram from xpu or cuda when directml is enabled. --- comfy/model_management.py | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index 9497ae7af..db5d368e1 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -34,13 +34,16 @@ if args.directml is not None: try: import torch - try: - import intel_extension_for_pytorch as ipex - if torch.xpu.is_available(): - xpu_available = True - total_vram = torch.xpu.get_device_properties(torch.xpu.current_device()).total_memory / (1024 * 1024) - except: - total_vram = torch.cuda.mem_get_info(torch.cuda.current_device())[1] / (1024 * 1024) + if directml_enabled: + total_vram = 4097 #TODO + else: + try: + import intel_extension_for_pytorch as ipex + if torch.xpu.is_available(): + xpu_available = True + total_vram = torch.xpu.get_device_properties(torch.xpu.current_device()).total_memory / (1024 * 1024) + except: + total_vram = torch.cuda.mem_get_info(torch.cuda.current_device())[1] / (1024 * 1024) total_ram = psutil.virtual_memory().total / (1024 * 1024) if not args.normalvram and not args.cpu: if total_vram <= 4096: From af02393c2a7134861df57e5843fc17498c65a795 Mon Sep 17 00:00:00 2001 From: Jacob Segal Date: Sat, 29 Apr 2023 00:16:58 -0700 Subject: [PATCH 42/56] Default to sampling entire image By default, when applying a mask to a condition, the entire image will still be used for sampling. The new "set_area_to_bounds" option on the node will allow the user to automatically limit conditioning to the bounds of the mask. I've also removed the dependency on torchvision for calculating bounding boxes. I've taken the opportunity to fix some frustrating details in the other version: 1. An all-0 mask will no longer cause an error 2. Indices are returned as integers instead of floats so they can be used to index into tensors. --- comfy/samplers.py | 42 ++++++++++++++++++++++++++++++++---------- nodes.py | 4 +++- 2 files changed, 35 insertions(+), 11 deletions(-) diff --git a/comfy/samplers.py b/comfy/samplers.py index 6fa754b90..f8701c879 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -6,7 +6,6 @@ import contextlib from comfy import model_management from .ldm.models.diffusion.ddim import DDIMSampler from .ldm.modules.diffusionmodules.util import make_ddim_timesteps -from torchvision.ops import masks_to_boxes #The main sampling function shared by all the samplers #Returns predicted noise @@ -31,8 +30,7 @@ def sampling_function(model_function, x, timestep, uncond, cond, cond_scale, con assert(mask.shape[1] == x_in.shape[2]) assert(mask.shape[2] == x_in.shape[3]) mask = mask[:,area[2]:area[0] + area[2],area[3]:area[1] + area[3]] - if mask.shape[0] != input_x.shape[0]: - mask = mask.repeat(input_x.shape[0], 1, 1) + mask = mask.unsqueeze(1).repeat(input_x.shape[0] // mask.shape[0], input_x.shape[1], 1, 1) else: mask = torch.ones_like(input_x) mult = mask * strength @@ -315,6 +313,29 @@ def blank_inpaint_image_like(latent_image): blank_image[:,3] *= 0.1380 return blank_image +def get_mask_aabb(masks): + if masks.numel() == 0: + return torch.zeros((0, 4), device=masks.device, dtype=torch.int) + + b = masks.shape[0] + + bounding_boxes = torch.zeros((b, 4), device=masks.device, dtype=torch.int) + is_empty = torch.zeros((b), device=masks.device, dtype=torch.bool) + for i in range(b): + mask = masks[i] + if mask.numel() == 0: + continue + if torch.max(mask != 0) == False: + is_empty[i] = True + continue + y, x = torch.where(mask) + bounding_boxes[i, 0] = torch.min(x) + bounding_boxes[i, 1] = torch.min(y) + bounding_boxes[i, 2] = torch.max(x) + bounding_boxes[i, 3] = torch.max(y) + + return bounding_boxes, is_empty + def resolve_cond_masks(conditions, h, w, device): # We need to decide on an area outside the sampling loop in order to properly generate opposite areas of equal sizes. # While we're doing this, we can also resolve the mask device and scaling for performance reasons @@ -329,13 +350,14 @@ def resolve_cond_masks(conditions, h, w, device): if mask.shape[2] != h or mask.shape[3] != w: mask = torch.nn.functional.interpolate(mask.unsqueeze(1), size=(h, w), mode='bilinear', align_corners=False).squeeze(1) - if 'area' not in modified: + if modified.get("set_area_to_bounds", False): bounds = torch.max(torch.abs(mask),dim=0).values.unsqueeze(0) - if torch.max(bounds) == 0: - # Handle the edge-case of an all black mask (where masks_to_boxes would error) - area = (0, 0, 0, 0) + boxes, is_empty = get_mask_aabb(bounds) + if is_empty[0]: + # Use the minimum possible size for efficiency reasons. (Since the mask is all-0, this becomes a noop anyway) + modified['area'] = (8, 8, 0, 0) else: - box = masks_to_boxes(bounds)[0].type(torch.int) + box = boxes[0] H, W, Y, X = (box[3] - box[1] + 1, box[2] - box[0] + 1, box[1], box[0]) # Make sure the height and width are divisible by 8 if X % 8 != 0: @@ -350,8 +372,8 @@ def resolve_cond_masks(conditions, h, w, device): H = H + (8 - (H % 8)) if W % 8 != 0: W = W + (8 - (W % 8)) - area = (int(H), int(W), int(Y), (X)) - modified['area'] = area + area = (int(H), int(W), int(Y), int(X)) + modified['area'] = area modified['mask'] = mask conditions[i] = [c[0], modified] diff --git a/nodes.py b/nodes.py index be02f4676..12fa7e5a3 100644 --- a/nodes.py +++ b/nodes.py @@ -90,6 +90,7 @@ class ConditioningSetMask: def INPUT_TYPES(s): return {"required": {"conditioning": ("CONDITIONING", ), "mask": ("MASK", ), + "set_area_to_bounds": ([False, True],), "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), }} RETURN_TYPES = ("CONDITIONING",) @@ -97,7 +98,7 @@ class ConditioningSetMask: CATEGORY = "conditioning" - def append(self, conditioning, mask, strength, min_sigma=0.0, max_sigma=99.0): + def append(self, conditioning, mask, set_area_to_bounds, strength, min_sigma=0.0, max_sigma=99.0): c = [] if len(mask.shape) < 3: mask = mask.unsqueeze(0) @@ -105,6 +106,7 @@ class ConditioningSetMask: n = [t[0], t[1].copy()] _, h, w = mask.shape n[1]['mask'] = mask + n[1]['set_area_to_bounds'] = set_area_to_bounds n[1]['strength'] = strength n[1]['min_sigma'] = min_sigma n[1]['max_sigma'] = max_sigma From ffd0f9f417d94bce03ea863131df9e6a86a89ada Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Sat, 29 Apr 2023 17:19:14 +0100 Subject: [PATCH 43/56] Search filter by type --- web/extensions/core/slotDefaults.js | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/web/extensions/core/slotDefaults.js b/web/extensions/core/slotDefaults.js index 3ec605900..9401678b0 100644 --- a/web/extensions/core/slotDefaults.js +++ b/web/extensions/core/slotDefaults.js @@ -6,6 +6,7 @@ app.registerExtension({ name: "Comfy.SlotDefaults", suggestionsNumber: null, init() { + LiteGraph.search_filter_enabled = true; LiteGraph.middle_click_slot_add_default_node = true; this.suggestionsNumber = app.ui.settings.addSetting({ id: "Comfy.NodeSuggestions.number", @@ -43,6 +44,14 @@ app.registerExtension({ } if (this.slot_types_default_out[type].includes(nodeId)) continue; this.slot_types_default_out[type].push(nodeId); + + // Input types have to be stored as lower case + // Store each node that can handle this input type + const lowerType = type.toLocaleLowerCase(); + if (!(lowerType in LiteGraph.registered_slot_in_types)) { + LiteGraph.registered_slot_in_types[lowerType] = { nodes: [] }; + } + LiteGraph.registered_slot_in_types[lowerType].nodes.push(nodeType.comfyClass); } var outputs = nodeData["output"]; @@ -53,6 +62,16 @@ app.registerExtension({ } this.slot_types_default_in[type].push(nodeId); + + // Store each node that can handle this output type + if (!(type in LiteGraph.registered_slot_out_types)) { + LiteGraph.registered_slot_out_types[type] = { nodes: [] }; + } + LiteGraph.registered_slot_out_types[type].nodes.push(nodeType.comfyClass); + + if(!LiteGraph.slot_types_out.includes(type)) { + LiteGraph.slot_types_out.push(type); + } } var maxNum = this.suggestionsNumber.value; this.setDefaults(maxNum); From 15a4c0db3b11c75350268950d8d0da175e72440d Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Sat, 29 Apr 2023 17:29:07 +0100 Subject: [PATCH 44/56] - button hover style - ensure context menu is always above everything --- web/style.css | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/web/style.css b/web/style.css index 2cbf02c0c..eced33d29 100644 --- a/web/style.css +++ b/web/style.css @@ -120,7 +120,7 @@ body { .comfy-menu > button, .comfy-menu-btns button, .comfy-menu .comfy-list button, -.comfy-modal button{ +.comfy-modal button { color: var(--input-text); background-color: var(--comfy-input-bg); border-radius: 8px; @@ -129,6 +129,15 @@ body { margin-top: 2px; } +.comfy-menu > button:hover, +.comfy-menu-btns button:hover, +.comfy-menu .comfy-list button:hover, +.comfy-modal button:hover, +.comfy-settings-btn:hover { + filter: brightness(1.2); + cursor: pointer; +} + .comfy-menu span.drag-handle { width: 10px; height: 20px; @@ -284,4 +293,7 @@ button.comfy-queue-btn { top: 0; right: 2px; } - \ No newline at end of file + + .litecontextmenu { + z-index: 9999 !important; +} \ No newline at end of file From 071011aebed2b636865dacacf6213d6714d6d80c Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sat, 29 Apr 2023 20:06:53 -0400 Subject: [PATCH 45/56] Mask strength should be separate from area strength. --- comfy/samplers.py | 5 ++++- nodes.py | 6 ++---- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/comfy/samplers.py b/comfy/samplers.py index f8701c879..10527fb1c 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -26,10 +26,13 @@ def sampling_function(model_function, x, timestep, uncond, cond, cond_scale, con if 'mask' in cond[1]: # Scale the mask to the size of the input # The mask should have been resized as we began the sampling process + mask_strength = 1.0 + if "mask_strength" in cond[1]: + mask_strength = cond[1]["mask_strength"] mask = cond[1]['mask'] assert(mask.shape[1] == x_in.shape[2]) assert(mask.shape[2] == x_in.shape[3]) - mask = mask[:,area[2]:area[0] + area[2],area[3]:area[1] + area[3]] + mask = mask[:,area[2]:area[0] + area[2],area[3]:area[1] + area[3]] * mask_strength mask = mask.unsqueeze(1).repeat(input_x.shape[0] // mask.shape[0], input_x.shape[1], 1, 1) else: mask = torch.ones_like(input_x) diff --git a/nodes.py b/nodes.py index 12fa7e5a3..b4069c836 100644 --- a/nodes.py +++ b/nodes.py @@ -98,7 +98,7 @@ class ConditioningSetMask: CATEGORY = "conditioning" - def append(self, conditioning, mask, set_area_to_bounds, strength, min_sigma=0.0, max_sigma=99.0): + def append(self, conditioning, mask, set_area_to_bounds, strength): c = [] if len(mask.shape) < 3: mask = mask.unsqueeze(0) @@ -107,9 +107,7 @@ class ConditioningSetMask: _, h, w = mask.shape n[1]['mask'] = mask n[1]['set_area_to_bounds'] = set_area_to_bounds - n[1]['strength'] = strength - n[1]['min_sigma'] = min_sigma - n[1]['max_sigma'] = max_sigma + n[1]['mask_strength'] = strength c.append(n) return (c, ) From c66db067630c57ec037b906b6b3f766d1153522b Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sat, 29 Apr 2023 20:19:14 -0400 Subject: [PATCH 46/56] Make ConditioningSetMask area option a bit more clear. Make ConditioningSetArea override the set_area_to_bounds. --- nodes.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/nodes.py b/nodes.py index b4069c836..c9d660738 100644 --- a/nodes.py +++ b/nodes.py @@ -80,6 +80,7 @@ class ConditioningSetArea: n = [t[0], t[1].copy()] n[1]['area'] = (height // 8, width // 8, y // 8, x // 8) n[1]['strength'] = strength + n[1]['set_area_to_bounds'] = False n[1]['min_sigma'] = min_sigma n[1]['max_sigma'] = max_sigma c.append(n) @@ -90,16 +91,19 @@ class ConditioningSetMask: def INPUT_TYPES(s): return {"required": {"conditioning": ("CONDITIONING", ), "mask": ("MASK", ), - "set_area_to_bounds": ([False, True],), "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), + "set_cond_area": (["default", "mask bounds"],), }} RETURN_TYPES = ("CONDITIONING",) FUNCTION = "append" CATEGORY = "conditioning" - def append(self, conditioning, mask, set_area_to_bounds, strength): + def append(self, conditioning, mask, set_cond_area, strength): c = [] + set_area_to_bounds = False + if set_cond_area != "default": + set_area_to_bounds = True if len(mask.shape) < 3: mask = mask.unsqueeze(0) for t in conditioning: From 4cea9aecdab6bbd7b5801c64c27368ee3203a9ad Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sat, 29 Apr 2023 20:53:03 -0400 Subject: [PATCH 47/56] Make nodes easier to resize. --- web/lib/litegraph.core.js | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/web/lib/litegraph.core.js b/web/lib/litegraph.core.js index 20ec35476..d471c0f50 100644 --- a/web/lib/litegraph.core.js +++ b/web/lib/litegraph.core.js @@ -5880,10 +5880,10 @@ LGraphNode.prototype.executeAction = function(action) node.resizable !== false && isInsideRectangle( e.canvasX, e.canvasY, - node.pos[0] + node.size[0] - 5, - node.pos[1] + node.size[1] - 5, - 10, - 10 + node.pos[0] + node.size[0] - 15, + node.pos[1] + node.size[1] - 15, + 20, + 20 ) ) { this.graph.beforeChange(); @@ -6428,10 +6428,10 @@ LGraphNode.prototype.executeAction = function(action) isInsideRectangle( e.canvasX, e.canvasY, - node.pos[0] + node.size[0] - 5, - node.pos[1] + node.size[1] - 5, - 5, - 5 + node.pos[0] + node.size[0] - 15, + node.pos[1] + node.size[1] - 15, + 15, + 15 ) ) { this.canvas.style.cursor = "se-resize"; From a2e18b15046456c86b0d550d515c737f976d03d6 Mon Sep 17 00:00:00 2001 From: BlenderNeko <126974546+BlenderNeko@users.noreply.github.com> Date: Sun, 30 Apr 2023 18:59:58 +0200 Subject: [PATCH 48/56] allow disabling of progress bar when sampling --- comfy/samplers.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/comfy/samplers.py b/comfy/samplers.py index 10527fb1c..1b486f803 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -541,7 +541,7 @@ class KSampler: sigmas = self.calculate_sigmas(new_steps).to(self.device) self.sigmas = sigmas[-(steps + 1):] - def sample(self, noise, positive, negative, cfg, latent_image=None, start_step=None, last_step=None, force_full_denoise=False, denoise_mask=None, sigmas=None, callback=None): + def sample(self, noise, positive, negative, cfg, latent_image=None, start_step=None, last_step=None, force_full_denoise=False, denoise_mask=None, sigmas=None, callback=None, disable_pbar=False): if sigmas is None: sigmas = self.sigmas sigma_min = self.sigma_min @@ -610,9 +610,9 @@ class KSampler: with precision_scope(model_management.get_autocast_device(self.device)): if self.sampler == "uni_pc": - samples = uni_pc.sample_unipc(self.model_wrap, noise, latent_image, sigmas, sampling_function=sampling_function, max_denoise=max_denoise, extra_args=extra_args, noise_mask=denoise_mask, callback=callback) + samples = uni_pc.sample_unipc(self.model_wrap, noise, latent_image, sigmas, sampling_function=sampling_function, max_denoise=max_denoise, extra_args=extra_args, noise_mask=denoise_mask, callback=callback, disable=disable_pbar) elif self.sampler == "uni_pc_bh2": - samples = uni_pc.sample_unipc(self.model_wrap, noise, latent_image, sigmas, sampling_function=sampling_function, max_denoise=max_denoise, extra_args=extra_args, noise_mask=denoise_mask, callback=callback, variant='bh2') + samples = uni_pc.sample_unipc(self.model_wrap, noise, latent_image, sigmas, sampling_function=sampling_function, max_denoise=max_denoise, extra_args=extra_args, noise_mask=denoise_mask, callback=callback, variant='bh2', disable=disable_pbar) elif self.sampler == "ddim": timesteps = [] for s in range(sigmas.shape[0]): @@ -659,10 +659,10 @@ class KSampler: if latent_image is not None: noise += latent_image if self.sampler == "dpm_fast": - samples = k_diffusion_sampling.sample_dpm_fast(self.model_k, noise, sigma_min, sigmas[0], self.steps, extra_args=extra_args, callback=k_callback) + samples = k_diffusion_sampling.sample_dpm_fast(self.model_k, noise, sigma_min, sigmas[0], self.steps, extra_args=extra_args, callback=k_callback, disable=disable_pbar) elif self.sampler == "dpm_adaptive": - samples = k_diffusion_sampling.sample_dpm_adaptive(self.model_k, noise, sigma_min, sigmas[0], extra_args=extra_args, callback=k_callback) + samples = k_diffusion_sampling.sample_dpm_adaptive(self.model_k, noise, sigma_min, sigmas[0], extra_args=extra_args, callback=k_callback, disable=disable_pbar) else: - samples = getattr(k_diffusion_sampling, "sample_{}".format(self.sampler))(self.model_k, noise, sigmas, extra_args=extra_args, callback=k_callback) + samples = getattr(k_diffusion_sampling, "sample_{}".format(self.sampler))(self.model_k, noise, sigmas, extra_args=extra_args, callback=k_callback, disable=disable_pbar) return samples.to(torch.float32) From 20123624933cd559dc903f0b7c97566113018a1b Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sun, 30 Apr 2023 13:02:07 -0400 Subject: [PATCH 49/56] Adjust node resize area depending on outputs. --- web/lib/litegraph.core.js | 32 ++++++++++++++------------------ 1 file changed, 14 insertions(+), 18 deletions(-) diff --git a/web/lib/litegraph.core.js b/web/lib/litegraph.core.js index d471c0f50..2bc6af0c3 100644 --- a/web/lib/litegraph.core.js +++ b/web/lib/litegraph.core.js @@ -3628,6 +3628,18 @@ return size; }; + LGraphNode.prototype.inResizeCorner = function(canvasX, canvasY) { + var rows = this.outputs ? this.outputs.length : 1; + var outputs_offset = (this.constructor.slot_start_y || 0) + rows * LiteGraph.NODE_SLOT_HEIGHT; + return isInsideRectangle(canvasX, + canvasY, + this.pos[0] + this.size[0] - 15, + this.pos[1] + Math.max(this.size[1] - 15, outputs_offset), + 20, + 20 + ); + } + /** * returns all the info available about a property of this node. * @@ -5877,14 +5889,7 @@ LGraphNode.prototype.executeAction = function(action) if ( !this.connecting_node && !node.flags.collapsed && !this.live_mode ) { //Search for corner for resize if ( !skip_action && - node.resizable !== false && - isInsideRectangle( e.canvasX, - e.canvasY, - node.pos[0] + node.size[0] - 15, - node.pos[1] + node.size[1] - 15, - 20, - 20 - ) + node.resizable !== false && node.inResizeCorner(e.canvasX, e.canvasY) ) { this.graph.beforeChange(); this.resizing_node = node; @@ -6424,16 +6429,7 @@ LGraphNode.prototype.executeAction = function(action) //Search for corner if (this.canvas) { - if ( - isInsideRectangle( - e.canvasX, - e.canvasY, - node.pos[0] + node.size[0] - 15, - node.pos[1] + node.size[1] - 15, - 15, - 15 - ) - ) { + if (node.inResizeCorner(e.canvasX, e.canvasY)) { this.canvas.style.cursor = "se-resize"; } else { this.canvas.style.cursor = "crosshair"; From 29c8f1a3442aad7d615430f8484b85de995c141c Mon Sep 17 00:00:00 2001 From: FizzleDorf <1fizzledorf@gmail.com> Date: Sun, 30 Apr 2023 17:33:15 -0400 Subject: [PATCH 50/56] Conditioning Average (#495) * first commit * fixed a bunch of things missing in initial commit. * parameters renamed for clarity * renamed node, attempted update cond list * to_strength removed, it is now normalized * removed comments and prints. Attempted to apply to every cond in list again but no luck * fixed repeating frames after batch using deepcopy * Revert "fixed repeating frames after batch using deepcopy" This reverts commit 1086d6a0e1f5c5c9247312872402ff8e60358fe1. * Rewrite addWeighted to use torch.mul iteratively. --------- Co-authored-by: City <125218114+city96@users.noreply.github.com> --- nodes.py | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/nodes.py b/nodes.py index c9d660738..fc3d2f183 100644 --- a/nodes.py +++ b/nodes.py @@ -59,6 +59,27 @@ class ConditioningCombine: def combine(self, conditioning_1, conditioning_2): return (conditioning_1 + conditioning_2, ) +class ConditioningAverage : + @classmethod + def INPUT_TYPES(s): + return {"required": {"conditioning_from": ("CONDITIONING", ), "conditioning_to": ("CONDITIONING", ), + "conditioning_from_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.1}) + }} + RETURN_TYPES = ("CONDITIONING",) + FUNCTION = "addWeighted" + + CATEGORY = "conditioning" + + def addWeighted(self, conditioning_from, conditioning_to, conditioning_from_strength): + out = [] + for i in range(min(len(conditioning_from),len(conditioning_to))): + t0 = conditioning_from[i] + t1 = conditioning_to[i] + tw = torch.mul(t0[0],(1-conditioning_from_strength)) + torch.mul(t1[0],conditioning_from_strength) + n = [tw, t0[1].copy()] + out.append(n) + return (out, ) + class ConditioningSetArea: @classmethod def INPUT_TYPES(s): @@ -1143,6 +1164,7 @@ NODE_CLASS_MAPPINGS = { "ImageScale": ImageScale, "ImageInvert": ImageInvert, "ImagePadForOutpaint": ImagePadForOutpaint, + "ConditioningAverage ": ConditioningAverage , "ConditioningCombine": ConditioningCombine, "ConditioningSetArea": ConditioningSetArea, "ConditioningSetMask": ConditioningSetMask, @@ -1194,6 +1216,7 @@ NODE_DISPLAY_NAME_MAPPINGS = { "CLIPTextEncode": "CLIP Text Encode (Prompt)", "CLIPSetLastLayer": "CLIP Set Last Layer", "ConditioningCombine": "Conditioning (Combine)", + "ConditioningAverage ": "Conditioning (Average)", "ConditioningSetArea": "Conditioning (Set Area)", "ConditioningSetMask": "Conditioning (Set Mask)", "ControlNetApply": "Apply ControlNet", From 0aa667ed33aae800880153a91c283ac457d0b31c Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sun, 30 Apr 2023 17:28:55 -0400 Subject: [PATCH 51/56] Fix ConditioningAverage. --- nodes.py | 25 +++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/nodes.py b/nodes.py index fc3d2f183..53e0f74bf 100644 --- a/nodes.py +++ b/nodes.py @@ -62,21 +62,30 @@ class ConditioningCombine: class ConditioningAverage : @classmethod def INPUT_TYPES(s): - return {"required": {"conditioning_from": ("CONDITIONING", ), "conditioning_to": ("CONDITIONING", ), - "conditioning_from_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.1}) + return {"required": {"conditioning_to": ("CONDITIONING", ), "conditioning_from": ("CONDITIONING", ), + "conditioning_to_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}) }} RETURN_TYPES = ("CONDITIONING",) FUNCTION = "addWeighted" CATEGORY = "conditioning" - def addWeighted(self, conditioning_from, conditioning_to, conditioning_from_strength): + def addWeighted(self, conditioning_to, conditioning_from, conditioning_to_strength): out = [] - for i in range(min(len(conditioning_from),len(conditioning_to))): - t0 = conditioning_from[i] - t1 = conditioning_to[i] - tw = torch.mul(t0[0],(1-conditioning_from_strength)) + torch.mul(t1[0],conditioning_from_strength) - n = [tw, t0[1].copy()] + + if len(conditioning_from) > 1: + print("Warning: ConditioningAverage conditioning_from contains more than 1 cond, only the first one will actually be applied to conditioning_to.") + + cond_from = conditioning_from[0][0] + + for i in range(len(conditioning_to)): + t1 = conditioning_to[i][0] + t0 = cond_from[:,:t1.shape[1]] + if t0.shape[1] < t1.shape[1]: + t0 = torch.cat([t0] + [torch.zeros((1, (t1.shape[1] - t0.shape[1]), t1.shape[2]))], dim=1) + + tw = torch.mul(t1, conditioning_to_strength) + torch.mul(t0, (1.0 - conditioning_to_strength)) + n = [tw, conditioning_to[i][1].copy()] out.append(n) return (out, ) From b04e16ef5a7cd9cbf80d272a455bd34e869a6ec8 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sun, 30 Apr 2023 18:19:03 -0400 Subject: [PATCH 52/56] Make default workflow use an existing checkpoint if no SD1.5 checkpoint. --- web/scripts/app.js | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/web/scripts/app.js b/web/scripts/app.js index a161bf40e..ada1708dc 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -971,8 +971,10 @@ export class ComfyApp { loadGraphData(graphData) { this.clean(); + let reset_invalid_values = false; if (!graphData) { graphData = structuredClone(defaultGraph); + reset_invalid_values = true; } const missingNodeTypes = []; @@ -1058,6 +1060,13 @@ export class ComfyApp { } } } + if (reset_invalid_values) { + if (widget.type == "combo") { + if (!widget.options.values.includes(widget.value) && widget.options.values.length > 0) { + widget.value = widget.options.values[0]; + } + } + } } } From 6aae1f497f680355b0e51242c4195cf75803056d Mon Sep 17 00:00:00 2001 From: EllangoK Date: Mon, 1 May 2023 13:16:19 -0400 Subject: [PATCH 53/56] style context menu fix graphdialog background, and palette template --- web/extensions/core/colorPalette.js | 17 +++++++++++++++ web/style.css | 34 ++++++++++++++++++++++++----- 2 files changed, 45 insertions(+), 6 deletions(-) diff --git a/web/extensions/core/colorPalette.js b/web/extensions/core/colorPalette.js index 41541a8d8..2f2238a2b 100644 --- a/web/extensions/core/colorPalette.js +++ b/web/extensions/core/colorPalette.js @@ -232,10 +232,27 @@ app.registerExtension({ "name": "My Color Palette", "colors": { "node_slot": { + }, + "litegraph_base": { + }, + "comfy_base": { } } }; + // Copy over missing keys from default color palette + const defaultColorPalette = colorPalettes[defaultColorPaletteId]; + for (const key in defaultColorPalette.colors.litegraph_base) { + if (!colorPalette.colors.litegraph_base[key]) { + colorPalette.colors.litegraph_base[key] = ""; + } + } + for (const key in defaultColorPalette.colors.comfy_base) { + if (!colorPalette.colors.comfy_base[key]) { + colorPalette.colors.comfy_base[key] = ""; + } + } + return completeColorPalette(colorPalette); }; diff --git a/web/style.css b/web/style.css index eced33d29..6ef3a4c21 100644 --- a/web/style.css +++ b/web/style.css @@ -257,8 +257,11 @@ button.comfy-queue-btn { } } +/* Input popup */ + .graphdialog { min-height: 1em; + background-color: var(--comfy-menu-bg); } .graphdialog .name { @@ -282,18 +285,37 @@ button.comfy-queue-btn { border-radius: 12px 0 0 12px; } +/* Context menu */ + .litegraph .litemenu-entry.has_submenu { position: relative; padding-right: 20px; - } +} - .litemenu-entry.has_submenu::after { +.litemenu-entry.has_submenu::after { content: ">"; position: absolute; top: 0; right: 2px; - } - - .litecontextmenu { +} + +.litecontextmenu { z-index: 9999 !important; -} \ No newline at end of file +} + +.litegraph.litecontextmenu { + background-color: var(--comfy-menu-bg) !important; + filter: brightness(95%); + color: var(--input-text) !important; +} + +.litegraph.litecontextmenu .litemenu-entry:hover:not(.disabled):not(.separator) { + background-color: var(--comfy-menu-bg) !important; + filter: brightness(155%); + color: var(--input-text) !important; +} + +.litegraph.litecontextmenu .litemenu-entry.submenu { + background-color: var(--comfy-menu-bg) !important; + color: var(--input-text) !important; +} From d3293c833947928456cd69a67c5e7d602216f997 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 1 May 2023 15:47:10 -0400 Subject: [PATCH 54/56] Properly disable all progress bars when disable_pbar=True --- comfy/extra_samplers/uni_pc.py | 8 ++++---- comfy/ldm/models/diffusion/ddim.py | 8 +++++--- comfy/sample.py | 4 ++-- comfy/samplers.py | 3 ++- 4 files changed, 13 insertions(+), 10 deletions(-) diff --git a/comfy/extra_samplers/uni_pc.py b/comfy/extra_samplers/uni_pc.py index 2952be62d..78bab5936 100644 --- a/comfy/extra_samplers/uni_pc.py +++ b/comfy/extra_samplers/uni_pc.py @@ -712,7 +712,7 @@ class UniPC: def sample(self, x, timesteps, t_start=None, t_end=None, order=3, skip_type='time_uniform', method='singlestep', lower_order_final=True, denoise_to_zero=False, solver_type='dpm_solver', - atol=0.0078, rtol=0.05, corrector=False, callback=None + atol=0.0078, rtol=0.05, corrector=False, callback=None, disable_pbar=False ): t_0 = 1. / self.noise_schedule.total_N if t_end is None else t_end t_T = self.noise_schedule.T if t_start is None else t_start @@ -723,7 +723,7 @@ class UniPC: # timesteps = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=steps, device=device) assert timesteps.shape[0] - 1 == steps # with torch.no_grad(): - for step_index in trange(steps): + for step_index in trange(steps, disable=disable_pbar): if self.noise_mask is not None: x = x * self.noise_mask + (1. - self.noise_mask) * (self.masked_image * self.noise_schedule.marginal_alpha(timesteps[step_index]) + self.noise * self.noise_schedule.marginal_std(timesteps[step_index])) if step_index == 0: @@ -835,7 +835,7 @@ def expand_dims(v, dims): -def sample_unipc(model, noise, image, sigmas, sampling_function, max_denoise, extra_args=None, callback=None, disable=None, noise_mask=None, variant='bh1'): +def sample_unipc(model, noise, image, sigmas, sampling_function, max_denoise, extra_args=None, callback=None, disable=False, noise_mask=None, variant='bh1'): to_zero = False if sigmas[-1] == 0: timesteps = torch.nn.functional.interpolate(sigmas[None,None,:-1], size=(len(sigmas),), mode='linear')[0][0] @@ -879,7 +879,7 @@ def sample_unipc(model, noise, image, sigmas, sampling_function, max_denoise, ex order = min(3, len(timesteps) - 1) uni_pc = UniPC(model_fn, ns, predict_x0=True, thresholding=False, noise_mask=noise_mask, masked_image=image, noise=noise, variant=variant) - x = uni_pc.sample(img, timesteps=timesteps, skip_type="time_uniform", method="multistep", order=order, lower_order_final=True, callback=callback) + x = uni_pc.sample(img, timesteps=timesteps, skip_type="time_uniform", method="multistep", order=order, lower_order_final=True, callback=callback, disable_pbar=disable) if not to_zero: x /= ns.marginal_alpha(timesteps[-1]) return x diff --git a/comfy/ldm/models/diffusion/ddim.py b/comfy/ldm/models/diffusion/ddim.py index e00ffd3f5..deab76f21 100644 --- a/comfy/ldm/models/diffusion/ddim.py +++ b/comfy/ldm/models/diffusion/ddim.py @@ -81,6 +81,7 @@ class DDIMSampler(object): extra_args=None, to_zero=True, end_step=None, + disable_pbar=False, **kwargs ): self.make_schedule_timesteps(ddim_timesteps=ddim_timesteps, ddim_eta=eta, verbose=verbose) @@ -103,7 +104,8 @@ class DDIMSampler(object): denoise_function=denoise_function, extra_args=extra_args, to_zero=to_zero, - end_step=end_step + end_step=end_step, + disable_pbar=disable_pbar ) return samples, intermediates @@ -185,7 +187,7 @@ class DDIMSampler(object): mask=None, x0=None, img_callback=None, log_every_t=100, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, unconditional_guidance_scale=1., unconditional_conditioning=None, dynamic_threshold=None, - ucg_schedule=None, denoise_function=None, extra_args=None, to_zero=True, end_step=None): + ucg_schedule=None, denoise_function=None, extra_args=None, to_zero=True, end_step=None, disable_pbar=False): device = self.model.betas.device b = shape[0] if x_T is None: @@ -204,7 +206,7 @@ class DDIMSampler(object): total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0] # print(f"Running DDIM Sampling with {total_steps} timesteps") - iterator = tqdm(time_range[:end_step], desc='DDIM Sampler', total=end_step) + iterator = tqdm(time_range[:end_step], desc='DDIM Sampler', total=end_step, disable=disable_pbar) for i, step in enumerate(iterator): index = total_steps - i - 1 diff --git a/comfy/sample.py b/comfy/sample.py index f4132bbed..bd38585ac 100644 --- a/comfy/sample.py +++ b/comfy/sample.py @@ -56,7 +56,7 @@ def cleanup_additional_models(models): for m in models: m.cleanup() -def sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=1.0, disable_noise=False, start_step=None, last_step=None, force_full_denoise=False, noise_mask=None, sigmas=None, callback=None): +def sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=1.0, disable_noise=False, start_step=None, last_step=None, force_full_denoise=False, noise_mask=None, sigmas=None, callback=None, disable_pbar=False): device = comfy.model_management.get_torch_device() if noise_mask is not None: @@ -76,7 +76,7 @@ def sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative sampler = comfy.samplers.KSampler(real_model, steps=steps, device=device, sampler=sampler_name, scheduler=scheduler, denoise=denoise, model_options=model.model_options) - samples = sampler.sample(noise, positive_copy, negative_copy, cfg=cfg, latent_image=latent_image, start_step=start_step, last_step=last_step, force_full_denoise=force_full_denoise, denoise_mask=noise_mask, sigmas=sigmas, callback=callback) + samples = sampler.sample(noise, positive_copy, negative_copy, cfg=cfg, latent_image=latent_image, start_step=start_step, last_step=last_step, force_full_denoise=force_full_denoise, denoise_mask=noise_mask, sigmas=sigmas, callback=callback, disable_pbar=disable_pbar) samples = samples.cpu() cleanup_additional_models(models) diff --git a/comfy/samplers.py b/comfy/samplers.py index 1b486f803..b30fc3d9b 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -643,7 +643,8 @@ class KSampler: extra_args=extra_args, mask=noise_mask, to_zero=sigmas[-1]==0, - end_step=sigmas.shape[0] - 1) + end_step=sigmas.shape[0] - 1, + disable_pbar=disable_pbar) else: extra_args["denoise_mask"] = denoise_mask From 81bee39ca0540aa7bbab275bb6bb9f156e72addd Mon Sep 17 00:00:00 2001 From: EllangoK Date: Mon, 1 May 2023 15:57:10 -0400 Subject: [PATCH 55/56] style everything styles searchbox, should be actually everything --- web/style.css | 43 ++++++++++++++++++++++++++++++++++++------- 1 file changed, 36 insertions(+), 7 deletions(-) diff --git a/web/style.css b/web/style.css index 6ef3a4c21..df220cc02 100644 --- a/web/style.css +++ b/web/style.css @@ -299,23 +299,52 @@ button.comfy-queue-btn { right: 2px; } -.litecontextmenu { +.litegraph.litecontextmenu, +.litegraph.litecontextmenu.dark { z-index: 9999 !important; -} - -.litegraph.litecontextmenu { background-color: var(--comfy-menu-bg) !important; filter: brightness(95%); - color: var(--input-text) !important; } .litegraph.litecontextmenu .litemenu-entry:hover:not(.disabled):not(.separator) { background-color: var(--comfy-menu-bg) !important; filter: brightness(155%); + color: var(--input-text); +} + +.litegraph.litecontextmenu .litemenu-entry.submenu, +.litegraph.litecontextmenu.dark .litemenu-entry.submenu { + background-color: var(--comfy-menu-bg) !important; + color: var(--input-text); +} + +.litegraph.litecontextmenu input { + background-color: var(--comfy-input-bg) !important; color: var(--input-text) !important; } -.litegraph.litecontextmenu .litemenu-entry.submenu { +/* Search box */ + +.litegraph.litesearchbox { + z-index: 9999 !important; background-color: var(--comfy-menu-bg) !important; - color: var(--input-text) !important; + overflow: hidden; +} + +.litegraph.litesearchbox input, +.litegraph.litesearchbox select { + background-color: var(--comfy-input-bg) !important; + color: var(--input-text); +} + +.litegraph.lite-search-item { + color: var(--input-text); + background-color: var(--comfy-input-bg); + filter: brightness(80%); + padding-left: 0.2em; +} + +.litegraph.lite-search-item.generic_type { + color: var(--input-text); + filter: brightness(50%); } From 9c335a553fd9f8d4c3c97eeaec5dca89a2a900f0 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 1 May 2023 18:11:58 -0400 Subject: [PATCH 56/56] LoKR support. --- comfy/sd.py | 77 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 77 insertions(+) diff --git a/comfy/sd.py b/comfy/sd.py index 92dbb931d..3eb50cc95 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -111,6 +111,8 @@ def load_lora(path, to_load): loaded_keys.add(A_name) loaded_keys.add(B_name) + + ######## loha hada_w1_a_name = "{}.hada_w1_a".format(x) hada_w1_b_name = "{}.hada_w1_b".format(x) hada_w2_a_name = "{}.hada_w2_a".format(x) @@ -132,6 +134,54 @@ def load_lora(path, to_load): loaded_keys.add(hada_w2_a_name) loaded_keys.add(hada_w2_b_name) + + ######## lokr + lokr_w1_name = "{}.lokr_w1".format(x) + lokr_w2_name = "{}.lokr_w2".format(x) + lokr_w1_a_name = "{}.lokr_w1_a".format(x) + lokr_w1_b_name = "{}.lokr_w1_b".format(x) + lokr_t2_name = "{}.lokr_t2".format(x) + lokr_w2_a_name = "{}.lokr_w2_a".format(x) + lokr_w2_b_name = "{}.lokr_w2_b".format(x) + + lokr_w1 = None + if lokr_w1_name in lora.keys(): + lokr_w1 = lora[lokr_w1_name] + loaded_keys.add(lokr_w1_name) + + lokr_w2 = None + if lokr_w2_name in lora.keys(): + lokr_w2 = lora[lokr_w2_name] + loaded_keys.add(lokr_w2_name) + + lokr_w1_a = None + if lokr_w1_a_name in lora.keys(): + lokr_w1_a = lora[lokr_w1_a_name] + loaded_keys.add(lokr_w1_a_name) + + lokr_w1_b = None + if lokr_w1_b_name in lora.keys(): + lokr_w1_b = lora[lokr_w1_b_name] + loaded_keys.add(lokr_w1_b_name) + + lokr_w2_a = None + if lokr_w2_a_name in lora.keys(): + lokr_w2_a = lora[lokr_w2_a_name] + loaded_keys.add(lokr_w2_a_name) + + lokr_w2_b = None + if lokr_w2_b_name in lora.keys(): + lokr_w2_b = lora[lokr_w2_b_name] + loaded_keys.add(lokr_w2_b_name) + + lokr_t2 = None + if lokr_t2_name in lora.keys(): + lokr_t2 = lora[lokr_t2_name] + loaded_keys.add(lokr_t2_name) + + if (lokr_w1 is not None) or (lokr_w2 is not None) or (lokr_w1_a is not None) or (lokr_w2_a is not None): + patch_dict[to_load[x]] = (lokr_w1, lokr_w2, alpha, lokr_w1_a, lokr_w1_b, lokr_w2_a, lokr_w2_b, lokr_t2) + for x in lora.keys(): if x not in loaded_keys: print("lora key not loaded", x) @@ -315,6 +365,33 @@ class ModelPatcher: final_shape = [mat2.shape[1], mat2.shape[0], v[3].shape[2], v[3].shape[3]] mat2 = torch.mm(mat2.transpose(0, 1).flatten(start_dim=1).float(), v[3].transpose(0, 1).flatten(start_dim=1).float()).reshape(final_shape).transpose(0, 1) weight += (alpha * torch.mm(mat1.flatten(start_dim=1).float(), mat2.flatten(start_dim=1).float())).reshape(weight.shape).type(weight.dtype).to(weight.device) + elif len(v) == 8: #lokr + w1 = v[0] + w2 = v[1] + w1_a = v[3] + w1_b = v[4] + w2_a = v[5] + w2_b = v[6] + t2 = v[7] + dim = None + + if w1 is None: + dim = w1_b.shape[0] + w1 = torch.mm(w1_a.float(), w1_b.float()) + + if w2 is None: + dim = w2_b.shape[0] + if t2 is None: + w2 = torch.mm(w2_a.float(), w2_b.float()) + else: + w2 = torch.einsum('i j k l, j r, i p -> p r k l', t2.float(), w2_b.float(), w2_a.float()) + + if len(w2.shape) == 4: + w1 = w1.unsqueeze(2).unsqueeze(2) + if v[2] is not None and dim is not None: + alpha *= v[2] / dim + + weight += alpha * torch.kron(w1.float(), w2.float()).reshape(weight.shape).type(weight.dtype).to(weight.device) else: #loha w1a = v[0] w1b = v[1]