From 8a0a85e0fa68b6e400d508b61d97621ebb9bff29 Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Sun, 2 Apr 2023 19:03:34 +0100 Subject: [PATCH 01/62] Added filter input to combos --- web/extensions/core/contextMenuFilter.js | 66 ++++++++++++++++++++++ web/extensions/core/invertMenuScrolling.js | 2 +- 2 files changed, 67 insertions(+), 1 deletion(-) create mode 100644 web/extensions/core/contextMenuFilter.js diff --git a/web/extensions/core/contextMenuFilter.js b/web/extensions/core/contextMenuFilter.js new file mode 100644 index 000000000..4867a30a6 --- /dev/null +++ b/web/extensions/core/contextMenuFilter.js @@ -0,0 +1,66 @@ +import { app } from "/scripts/app.js"; + +// Adds filtering to context menus + +const id = "Comfy.ContextMenuFilter"; +app.registerExtension({ + name: id, + init() { + const ctxMenu = LiteGraph.ContextMenu; + LiteGraph.ContextMenu = function (values, options) { + const ctx = ctxMenu.call(this, values, options); + + // If we are a dark menu (only used for combo boxes) then add a filter input + if (options?.className === "dark" && values?.length > 10) { + const filter = document.createElement("input"); + Object.assign(filter.style, { + width: "calc(100% - 10px)", + border: "0", + boxSizing: "border-box", + background: "#333", + border: "1px solid #999", + margin: "0 0 5px 5px", + color: "#fff", + }); + filter.placeholder = "Filter list"; + this.root.prepend(filter); + + filter.addEventListener("input", () => { + // Hide all items that dont match our filter + const term = filter.value.toLocaleLowerCase(); + const items = this.root.querySelectorAll(".litemenu-entry"); + for (const item of items) { + item.style.display = !term || item.textContent.toLocaleLowerCase().includes(term) ? "block" : "none"; + } + + // If we have an event then we can try and position the list under the source + if (options.event) { + let top = options.event.clientY - 10; + + const bodyRect = document.body.getBoundingClientRect(); + const rootRect = this.root.getBoundingClientRect(); + if (bodyRect.height && top > bodyRect.height - rootRect.height - 10) { + top = Math.max(0, bodyRect.height - rootRect.height - 10); + } + + this.root.style.top = top + "px"; + } + }); + + requestAnimationFrame(() => { + // Focus the filter box when opening + filter.focus(); + + // If the top is off screen then shift the element + if (parseInt(this.root.style.top) < 0) { + this.root.style.top = 0; + } + }); + } + + return ctx; + }; + + LiteGraph.ContextMenu.prototype = ctxMenu.prototype; + }, +}); diff --git a/web/extensions/core/invertMenuScrolling.js b/web/extensions/core/invertMenuScrolling.js index 34523d55c..f900fccf4 100644 --- a/web/extensions/core/invertMenuScrolling.js +++ b/web/extensions/core/invertMenuScrolling.js @@ -3,10 +3,10 @@ import { app } from "/scripts/app.js"; // Inverts the scrolling of context menus const id = "Comfy.InvertMenuScrolling"; -const ctxMenu = LiteGraph.ContextMenu; app.registerExtension({ name: id, init() { + const ctxMenu = LiteGraph.ContextMenu; const replace = () => { LiteGraph.ContextMenu = function (values, options) { options = options || {}; From 74893be1ce6b8350d1eafea823450e9a002380e8 Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Sun, 2 Apr 2023 21:01:39 +0100 Subject: [PATCH 02/62] Added keyboard navigation + selection --- web/extensions/core/contextMenuFilter.js | 64 +++++++++++++++++++++++- 1 file changed, 62 insertions(+), 2 deletions(-) diff --git a/web/extensions/core/contextMenuFilter.js b/web/extensions/core/contextMenuFilter.js index 4867a30a6..ced5a0a34 100644 --- a/web/extensions/core/contextMenuFilter.js +++ b/web/extensions/core/contextMenuFilter.js @@ -25,13 +25,73 @@ app.registerExtension({ filter.placeholder = "Filter list"; this.root.prepend(filter); + let selectedIndex = 0; + let items = this.root.querySelectorAll(".litemenu-entry"); + let itemCount = items.length; + let selectedItem; + + // Apply highlighting to the selected item + function updateSelected() { + if (selectedItem) { + selectedItem.style.setProperty("background-color", ""); + selectedItem.style.setProperty("color", ""); + } + selectedItem = items[selectedIndex]; + if (selectedItem) { + selectedItem.style.setProperty("background-color", "#ccc", "important"); + selectedItem.style.setProperty("color", "#000", "important"); + } + } + + updateSelected(); + + // Arrow up/down to select items + filter.addEventListener("keydown", (e) => { + if (e.key === "ArrowUp") { + if (selectedIndex === 0) { + selectedIndex = itemCount - 1; + } else { + selectedIndex--; + } + updateSelected(); + e.preventDefault(); + } else if (e.key === "ArrowDown") { + if (selectedIndex === itemCount - 1) { + selectedIndex = 0; + } else { + selectedIndex++; + } + updateSelected(); + e.preventDefault(); + } else if ((selectedItem && e.key === "Enter") || e.keyCode === 13 || e.keyCode === 10) { + selectedItem.click(); + } + }); + filter.addEventListener("input", () => { // Hide all items that dont match our filter const term = filter.value.toLocaleLowerCase(); - const items = this.root.querySelectorAll(".litemenu-entry"); + items = this.root.querySelectorAll(".litemenu-entry"); + // When filtering recompute which items are visible for arrow up/down + // Try and maintain selection + let visibleItems = []; for (const item of items) { - item.style.display = !term || item.textContent.toLocaleLowerCase().includes(term) ? "block" : "none"; + const visible = !term || item.textContent.toLocaleLowerCase().includes(term); + if (visible) { + item.style.display = "block"; + if (item === selectedItem) { + selectedIndex = visibleItems.length; + } + visibleItems.push(item); + } else { + item.style.display = "none"; + if (item === selectedItem) { + selectedIndex = 0; + } + } } + items = visibleItems; + updateSelected(); // If we have an event then we can try and position the list under the source if (options.event) { From 32fd39b4245a50757fd8257ea199f74ade348b9a Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Sun, 2 Apr 2023 21:02:40 +0100 Subject: [PATCH 03/62] Update comment --- web/extensions/core/contextMenuFilter.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/extensions/core/contextMenuFilter.js b/web/extensions/core/contextMenuFilter.js index ced5a0a34..8aac84d5b 100644 --- a/web/extensions/core/contextMenuFilter.js +++ b/web/extensions/core/contextMenuFilter.js @@ -1,6 +1,6 @@ import { app } from "/scripts/app.js"; -// Adds filtering to context menus +// Adds filtering to combo context menus const id = "Comfy.ContextMenuFilter"; app.registerExtension({ From 1a322ca67a29cedd8b33da85fbae0c27a99cd24b Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Sun, 2 Apr 2023 21:37:24 +0100 Subject: [PATCH 04/62] Fix scaled position --- web/extensions/core/contextMenuFilter.js | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/web/extensions/core/contextMenuFilter.js b/web/extensions/core/contextMenuFilter.js index 8aac84d5b..fa4cb2422 100644 --- a/web/extensions/core/contextMenuFilter.js +++ b/web/extensions/core/contextMenuFilter.js @@ -111,9 +111,13 @@ app.registerExtension({ // Focus the filter box when opening filter.focus(); - // If the top is off screen then shift the element - if (parseInt(this.root.style.top) < 0) { - this.root.style.top = 0; + const rect = this.root.getBoundingClientRect(); + + // If the top is off screen then shift the element with scaling applied + if (rect.top < 0) { + const scale = 1 - this.root.getBoundingClientRect().height / this.root.clientHeight; + const shift = (this.root.clientHeight * scale) / 2; + this.root.style.top = -shift + "px"; } }); } From 4c7a9dbcb66d3a53764d4725f92f7c116bcb4821 Mon Sep 17 00:00:00 2001 From: EllangoK Date: Sun, 2 Apr 2023 18:44:27 -0400 Subject: [PATCH 05/62] adds Blend, Blur, Dither, Sharpen nodes --- comfy_extras/nodes_post_processing.py | 215 ++++++++++++++++++++++++++ nodes.py | 3 +- 2 files changed, 217 insertions(+), 1 deletion(-) create mode 100644 comfy_extras/nodes_post_processing.py diff --git a/comfy_extras/nodes_post_processing.py b/comfy_extras/nodes_post_processing.py new file mode 100644 index 000000000..3f3bddd75 --- /dev/null +++ b/comfy_extras/nodes_post_processing.py @@ -0,0 +1,215 @@ +import torch +import torch.nn.functional as F + + +class Blend: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image1": ("IMAGE",), + "image2": ("IMAGE",), + "blend_factor": ("FLOAT", { + "default": 0.5, + "min": 0.0, + "max": 1.0, + "step": 0.01 + }), + "blend_mode": (["normal", "multiply", "screen", "overlay", "soft_light"],), + }, + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "blend_images" + + CATEGORY = "postprocessing" + + def blend_images(self, image1: torch.Tensor, image2: torch.Tensor, blend_factor: float, blend_mode: str): + blended_image = self.blend_mode(image1, image2, blend_mode) + blended_image = image1 * (1 - blend_factor) + blended_image * blend_factor + blended_image = torch.clamp(blended_image, 0, 1) + return (blended_image,) + + def blend_mode(self, img1, img2, mode): + if mode == "normal": + return img2 + elif mode == "multiply": + return img1 * img2 + elif mode == "screen": + return 1 - (1 - img1) * (1 - img2) + elif mode == "overlay": + return torch.where(img1 <= 0.5, 2 * img1 * img2, 1 - 2 * (1 - img1) * (1 - img2)) + elif mode == "soft_light": + return torch.where(img2 <= 0.5, img1 - (1 - 2 * img2) * img1 * (1 - img1), img1 + (2 * img2 - 1) * (self.g(img1) - img1)) + else: + raise ValueError(f"Unsupported blend mode: {mode}") + + def g(self, x): + return torch.where(x <= 0.25, ((16 * x - 12) * x + 4) * x, torch.sqrt(x)) + +class Blur: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": ("IMAGE",), + "blur_radius": ("INT", { + "default": 1, + "min": 1, + "max": 31, + "step": 1 + }), + "sigma": ("FLOAT", { + "default": 1.0, + "min": 0.1, + "max": 10.0, + "step": 0.1 + }), + }, + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "blur" + + CATEGORY = "postprocessing" + + def gaussian_kernel(self, kernel_size: int, sigma: float): + x, y = torch.meshgrid(torch.linspace(-1, 1, kernel_size), torch.linspace(-1, 1, kernel_size), indexing="ij") + d = torch.sqrt(x * x + y * y) + g = torch.exp(-(d * d) / (2.0 * sigma * sigma)) + return g / g.sum() + + def blur(self, image: torch.Tensor, blur_radius: int, sigma: float): + if blur_radius == 0: + return (image,) + + batch_size, height, width, channels = image.shape + + kernel_size = blur_radius * 2 + 1 + kernel = self.gaussian_kernel(kernel_size, sigma).repeat(channels, 1, 1).unsqueeze(1) + + image = image.permute(0, 3, 1, 2) # Torch wants (B, C, H, W) we use (B, H, W, C) + blurred = F.conv2d(image, kernel, padding=kernel_size // 2, groups=channels) + blurred = blurred.permute(0, 2, 3, 1) + + return (blurred,) + +class Dither: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": ("IMAGE",), + "bits": ("INT", { + "default": 4, + "min": 1, + "max": 8, + "step": 1 + }), + }, + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "dither" + + CATEGORY = "postprocessing" + + def dither(self, image: torch.Tensor, bits: int): + batch_size, height, width, _ = image.shape + result = torch.zeros_like(image) + + for b in range(batch_size): + tensor_image = image[b] + img = (tensor_image * 255) + height, width, _ = img.shape + + scale = 255 / (2**bits - 1) + + for y in range(height): + for x in range(width): + old_pixel = img[y, x].clone() + new_pixel = torch.round(old_pixel / scale) * scale + img[y, x] = new_pixel + + quant_error = old_pixel - new_pixel + + if x + 1 < width: + img[y, x + 1] += quant_error * 7 / 16 + if y + 1 < height: + if x - 1 >= 0: + img[y + 1, x - 1] += quant_error * 3 / 16 + img[y + 1, x] += quant_error * 5 / 16 + if x + 1 < width: + img[y + 1, x + 1] += quant_error * 1 / 16 + + dithered = img / 255 + tensor = dithered.unsqueeze(0) + result[b] = tensor + + return (result,) + +class Sharpen: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": ("IMAGE",), + "sharpen_radius": ("INT", { + "default": 1, + "min": 1, + "max": 31, + "step": 1 + }), + "alpha": ("FLOAT", { + "default": 1.0, + "min": 0.1, + "max": 5.0, + "step": 0.1 + }), + }, + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "sharpen" + + CATEGORY = "postprocessing" + + def sharpen(self, image: torch.Tensor, sharpen_radius: int, alpha: float): + if sharpen_radius == 0: + return (image,) + + batch_size, height, width, channels = image.shape + + kernel_size = sharpen_radius * 2 + 1 + kernel = torch.ones((kernel_size, kernel_size), dtype=torch.float32) * -1 + center = kernel_size // 2 + kernel[center, center] = kernel_size**2 + kernel *= alpha + kernel = kernel.repeat(channels, 1, 1).unsqueeze(1) + + tensor_image = image.permute(0, 3, 1, 2) # Torch wants (B, C, H, W) we use (B, H, W, C) + sharpened = F.conv2d(tensor_image, kernel, padding=center, groups=channels) + sharpened = sharpened.permute(0, 2, 3, 1) + + result = torch.clamp(sharpened, 0, 1) + + return (result,) + +NODE_CLASS_MAPPINGS = { + "Blend": Blend, + "Blur": Blur, + "Dither": Dither, + "Sharpen": Sharpen, +} diff --git a/nodes.py b/nodes.py index 963ff32a0..a93f04108 100644 --- a/nodes.py +++ b/nodes.py @@ -1112,4 +1112,5 @@ def load_custom_nodes(): def init_custom_nodes(): load_custom_nodes() - load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_upscale_model.py")) \ No newline at end of file + load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_upscale_model.py")) + load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_post_processing.py")) From 028e1f7ad2a50efea8391ea54b606cf865d788db Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Mon, 3 Apr 2023 08:11:44 +0100 Subject: [PATCH 06/62] Fix scaled position when filtering Add esc to close --- web/extensions/core/contextMenuFilter.js | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/web/extensions/core/contextMenuFilter.js b/web/extensions/core/contextMenuFilter.js index fa4cb2422..51e66f924 100644 --- a/web/extensions/core/contextMenuFilter.js +++ b/web/extensions/core/contextMenuFilter.js @@ -43,6 +43,17 @@ app.registerExtension({ } } + const positionList = () => { + const rect = this.root.getBoundingClientRect(); + + // If the top is off screen then shift the element with scaling applied + if (rect.top < 0) { + const scale = 1 - this.root.getBoundingClientRect().height / this.root.clientHeight; + const shift = (this.root.clientHeight * scale) / 2; + this.root.style.top = -shift + "px"; + } + } + updateSelected(); // Arrow up/down to select items @@ -65,6 +76,8 @@ app.registerExtension({ e.preventDefault(); } else if ((selectedItem && e.key === "Enter") || e.keyCode === 13 || e.keyCode === 10) { selectedItem.click(); + } else if(e.key === "Escape") { + this.close(); } }); @@ -104,6 +117,7 @@ app.registerExtension({ } this.root.style.top = top + "px"; + positionList(); } }); @@ -111,14 +125,7 @@ app.registerExtension({ // Focus the filter box when opening filter.focus(); - const rect = this.root.getBoundingClientRect(); - - // If the top is off screen then shift the element with scaling applied - if (rect.top < 0) { - const scale = 1 - this.root.getBoundingClientRect().height / this.root.clientHeight; - const shift = (this.root.clientHeight * scale) / 2; - this.root.style.top = -shift + "px"; - } + positionList(); }); } From fa2febc0624678362cc758d316bb59afce9c8f06 Mon Sep 17 00:00:00 2001 From: EllangoK Date: Mon, 3 Apr 2023 09:52:04 -0400 Subject: [PATCH 07/62] blend supports any size, dither -> quantize --- comfy_extras/nodes_post_processing.py | 74 ++++++++++++++++----------- 1 file changed, 44 insertions(+), 30 deletions(-) diff --git a/comfy_extras/nodes_post_processing.py b/comfy_extras/nodes_post_processing.py index 3f3bddd75..322f3ca89 100644 --- a/comfy_extras/nodes_post_processing.py +++ b/comfy_extras/nodes_post_processing.py @@ -1,5 +1,7 @@ +import numpy as np import torch import torch.nn.functional as F +from PIL import Image class Blend: @@ -28,6 +30,9 @@ class Blend: CATEGORY = "postprocessing" def blend_images(self, image1: torch.Tensor, image2: torch.Tensor, blend_factor: float, blend_mode: str): + if image1.shape != image2.shape: + image2 = self.crop_and_resize(image2, image1.shape) + blended_image = self.blend_mode(image1, image2, blend_mode) blended_image = image1 * (1 - blend_factor) + blended_image * blend_factor blended_image = torch.clamp(blended_image, 0, 1) @@ -50,6 +55,29 @@ class Blend: def g(self, x): return torch.where(x <= 0.25, ((16 * x - 12) * x + 4) * x, torch.sqrt(x)) + def crop_and_resize(self, img: torch.Tensor, target_shape: tuple): + batch_size, img_h, img_w, img_c = img.shape + _, target_h, target_w, _ = target_shape + img_aspect_ratio = img_w / img_h + target_aspect_ratio = target_w / target_h + + # Crop center of the image to the target aspect ratio + if img_aspect_ratio > target_aspect_ratio: + new_width = int(img_h * target_aspect_ratio) + left = (img_w - new_width) // 2 + img = img[:, :, left:left + new_width, :] + else: + new_height = int(img_w / target_aspect_ratio) + top = (img_h - new_height) // 2 + img = img[:, top:top + new_height, :, :] + + # Resize to target size + img = img.permute(0, 3, 1, 2) # Torch wants (B, C, H, W) we use (B, H, W, C) + img = F.interpolate(img, size=(target_h, target_w), mode='bilinear', align_corners=False) + img = img.permute(0, 2, 3, 1) + + return img + class Blur: def __init__(self): pass @@ -100,7 +128,7 @@ class Blur: return (blurred,) -class Dither: +class Quantize: def __init__(self): pass @@ -109,51 +137,37 @@ class Dither: return { "required": { "image": ("IMAGE",), - "bits": ("INT", { - "default": 4, + "colors": ("INT", { + "default": 256, "min": 1, - "max": 8, + "max": 256, "step": 1 }), + "dither": (["none", "floyd-steinberg"],), }, } RETURN_TYPES = ("IMAGE",) - FUNCTION = "dither" + FUNCTION = "quantize" CATEGORY = "postprocessing" - def dither(self, image: torch.Tensor, bits: int): + def quantize(self, image: torch.Tensor, colors: int = 256, dither: str = "FLOYDSTEINBERG"): batch_size, height, width, _ = image.shape result = torch.zeros_like(image) + dither_option = Image.Dither.FLOYDSTEINBERG if dither == "floyd-steinberg" else Image.Dither.NONE + for b in range(batch_size): tensor_image = image[b] - img = (tensor_image * 255) - height, width, _ = img.shape + img = (tensor_image * 255).to(torch.uint8).numpy() + pil_image = Image.fromarray(img, mode='RGB') - scale = 255 / (2**bits - 1) + palette = pil_image.quantize(colors=colors) # Required as described in https://github.com/python-pillow/Pillow/issues/5836 + quantized_image = pil_image.quantize(colors=colors, palette=palette, dither=dither_option) - for y in range(height): - for x in range(width): - old_pixel = img[y, x].clone() - new_pixel = torch.round(old_pixel / scale) * scale - img[y, x] = new_pixel - - quant_error = old_pixel - new_pixel - - if x + 1 < width: - img[y, x + 1] += quant_error * 7 / 16 - if y + 1 < height: - if x - 1 >= 0: - img[y + 1, x - 1] += quant_error * 3 / 16 - img[y + 1, x] += quant_error * 5 / 16 - if x + 1 < width: - img[y + 1, x + 1] += quant_error * 1 / 16 - - dithered = img / 255 - tensor = dithered.unsqueeze(0) - result[b] = tensor + quantized_array = torch.tensor(np.array(quantized_image.convert("RGB"))).float() / 255 + result[b] = quantized_array return (result,) @@ -210,6 +224,6 @@ class Sharpen: NODE_CLASS_MAPPINGS = { "Blend": Blend, "Blur": Blur, - "Dither": Dither, + "Quantize": Quantize, "Sharpen": Sharpen, } From f50b1fec695cecc8f7c87ce1f39db3f6b49bb3a1 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 3 Apr 2023 13:50:29 -0400 Subject: [PATCH 08/62] Add noise augmentation setting to unCLIPConditioning. --- comfy/samplers.py | 16 +++++++++++++--- nodes.py | 5 +++-- 2 files changed, 16 insertions(+), 5 deletions(-) diff --git a/comfy/samplers.py b/comfy/samplers.py index ddec99007..59dbab53d 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -348,17 +348,27 @@ def encode_adm(noise_augmentor, conds, batch_size, device): if 'adm' in x[1]: adm_inputs = [] weights = [] + noise_aug = [] adm_in = x[1]["adm"] for adm_c in adm_in: adm_cond = adm_c[0].image_embeds weight = adm_c[1] - c_adm, noise_level_emb = noise_augmentor(adm_cond.to(device), noise_level=torch.tensor([0], device=device)) + noise_augment = adm_c[2] + noise_level = round((noise_augmentor.max_noise_level - 1) * noise_augment) + c_adm, noise_level_emb = noise_augmentor(adm_cond.to(device), noise_level=torch.tensor([noise_level], device=device)) adm_out = torch.cat((c_adm, noise_level_emb), 1) * weight weights.append(weight) + noise_aug.append(noise_augment) adm_inputs.append(adm_out) - adm_out = torch.stack(adm_inputs).sum(0) - #TODO: Apply Noise to Embedding Mix + if len(noise_aug) > 1: + adm_out = torch.stack(adm_inputs).sum(0) + #TODO: add a way to control this + noise_augment = 0.05 + noise_level = round((noise_augmentor.max_noise_level - 1) * noise_augment) + print(noise_level) + c_adm, noise_level_emb = noise_augmentor(adm_out[:, :noise_augmentor.time_embed.dim], noise_level=torch.tensor([noise_level], device=device)) + adm_out = torch.cat((c_adm, noise_level_emb), 1) else: adm_out = torch.zeros((1, noise_augmentor.time_embed.dim * 2), device=device) x[1] = x[1].copy() diff --git a/nodes.py b/nodes.py index 963ff32a0..ffbba9f94 100644 --- a/nodes.py +++ b/nodes.py @@ -445,17 +445,18 @@ class unCLIPConditioning: return {"required": {"conditioning": ("CONDITIONING", ), "clip_vision_output": ("CLIP_VISION_OUTPUT", ), "strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), + "noise_augmentation": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01}), }} RETURN_TYPES = ("CONDITIONING",) FUNCTION = "apply_adm" CATEGORY = "_for_testing/unclip" - def apply_adm(self, conditioning, clip_vision_output, strength): + def apply_adm(self, conditioning, clip_vision_output, strength, noise_augmentation): c = [] for t in conditioning: o = t[1].copy() - x = (clip_vision_output, strength) + x = (clip_vision_output, strength, noise_augmentation) if "adm" in o: o["adm"] = o["adm"][:] + [x] else: From 4e437582365cd1b9f8261be9405180feef382357 Mon Sep 17 00:00:00 2001 From: omar92 Date: Mon, 3 Apr 2023 21:27:43 +0200 Subject: [PATCH 09/62] fix bug in reroute node , that didnt allow to load old worflows --- web/extensions/core/rerouteNode.js | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/web/extensions/core/rerouteNode.js b/web/extensions/core/rerouteNode.js index 1342cae92..aee5d147c 100644 --- a/web/extensions/core/rerouteNode.js +++ b/web/extensions/core/rerouteNode.js @@ -50,12 +50,12 @@ app.registerExtension({ } else { // Move the previous node - currentNode = node; + currentNode = node; } } else { // We've found the end inputNode = currentNode; - inputType = node.outputs[link.origin_slot].type; + inputType = node.outputs[link.origin_slot]?.type ?? null; break; } } else { @@ -87,7 +87,7 @@ app.registerExtension({ updateNodes.push(node); } else { // We've found an output - const nodeOutType = node.inputs[link.target_slot].type; + const nodeOutType = node.inputs && node.inputs[link?.target_slot] && node.inputs[link.target_slot].type ? node.inputs[link.target_slot].type : null; if (inputType && nodeOutType !== inputType) { // The output doesnt match our input so disconnect it node.disconnectInput(link.target_slot); From 539ff487a81f4ed4f51ca9ece57756b573e52190 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 3 Apr 2023 15:49:28 -0400 Subject: [PATCH 10/62] Pull latest tomesd code from upstream. --- comfy/ldm/modules/tomesd.py | 69 ++++++++++++++++++++++++++----------- 1 file changed, 48 insertions(+), 21 deletions(-) diff --git a/comfy/ldm/modules/tomesd.py b/comfy/ldm/modules/tomesd.py index 1eafcd0aa..6a13b80c9 100644 --- a/comfy/ldm/modules/tomesd.py +++ b/comfy/ldm/modules/tomesd.py @@ -1,4 +1,4 @@ - +#Taken from: https://github.com/dbolya/tomesd import torch from typing import Tuple, Callable @@ -8,13 +8,23 @@ def do_nothing(x: torch.Tensor, mode:str=None): return x +def mps_gather_workaround(input, dim, index): + if input.shape[-1] == 1: + return torch.gather( + input.unsqueeze(-1), + dim - 1 if dim < 0 else dim, + index.unsqueeze(-1) + ).squeeze(-1) + else: + return torch.gather(input, dim, index) + + def bipartite_soft_matching_random2d(metric: torch.Tensor, w: int, h: int, sx: int, sy: int, r: int, no_rand: bool = False) -> Tuple[Callable, Callable]: """ Partitions the tokens into src and dst and merges r tokens from src to dst. Dst tokens are partitioned by choosing one randomy in each (sx, sy) region. - Args: - metric [B, N, C]: metric to use for similarity - w: image width in tokens @@ -28,33 +38,49 @@ def bipartite_soft_matching_random2d(metric: torch.Tensor, if r <= 0: return do_nothing, do_nothing + + gather = mps_gather_workaround if metric.device.type == "mps" else torch.gather with torch.no_grad(): hsy, wsx = h // sy, w // sx # For each sy by sx kernel, randomly assign one token to be dst and the rest src - idx_buffer = torch.zeros(1, hsy, wsx, sy*sx, 1, device=metric.device) - if no_rand: - rand_idx = torch.zeros(1, hsy, wsx, 1, 1, device=metric.device, dtype=torch.int64) + rand_idx = torch.zeros(hsy, wsx, 1, device=metric.device, dtype=torch.int64) else: - rand_idx = torch.randint(sy*sx, size=(1, hsy, wsx, 1, 1), device=metric.device) + rand_idx = torch.randint(sy*sx, size=(hsy, wsx, 1), device=metric.device) - idx_buffer.scatter_(dim=3, index=rand_idx, src=-torch.ones_like(rand_idx, dtype=idx_buffer.dtype)) - idx_buffer = idx_buffer.view(1, hsy, wsx, sy, sx, 1).transpose(2, 3).reshape(1, N, 1) - rand_idx = idx_buffer.argsort(dim=1) + # The image might not divide sx and sy, so we need to work on a view of the top left if the idx buffer instead + idx_buffer_view = torch.zeros(hsy, wsx, sy*sx, device=metric.device, dtype=torch.int64) + idx_buffer_view.scatter_(dim=2, index=rand_idx, src=-torch.ones_like(rand_idx, dtype=rand_idx.dtype)) + idx_buffer_view = idx_buffer_view.view(hsy, wsx, sy, sx).transpose(1, 2).reshape(hsy * sy, wsx * sx) - num_dst = int((1 / (sx*sy)) * N) + # Image is not divisible by sx or sy so we need to move it into a new buffer + if (hsy * sy) < h or (wsx * sx) < w: + idx_buffer = torch.zeros(h, w, device=metric.device, dtype=torch.int64) + idx_buffer[:(hsy * sy), :(wsx * sx)] = idx_buffer_view + else: + idx_buffer = idx_buffer_view + + # We set dst tokens to be -1 and src to be 0, so an argsort gives us dst|src indices + rand_idx = idx_buffer.reshape(1, -1, 1).argsort(dim=1) + + # We're finished with these + del idx_buffer, idx_buffer_view + + # rand_idx is currently dst|src, so split them + num_dst = hsy * wsx a_idx = rand_idx[:, num_dst:, :] # src b_idx = rand_idx[:, :num_dst, :] # dst def split(x): C = x.shape[-1] - src = x.gather(dim=1, index=a_idx.expand(B, N - num_dst, C)) - dst = x.gather(dim=1, index=b_idx.expand(B, num_dst, C)) + src = gather(x, dim=1, index=a_idx.expand(B, N - num_dst, C)) + dst = gather(x, dim=1, index=b_idx.expand(B, num_dst, C)) return src, dst + # Cosine similarity between A and B metric = metric / metric.norm(dim=-1, keepdim=True) a, b = split(metric) scores = a @ b.transpose(-1, -2) @@ -62,19 +88,20 @@ def bipartite_soft_matching_random2d(metric: torch.Tensor, # Can't reduce more than the # tokens in src r = min(a.shape[1], r) + # Find the most similar greedily node_max, node_idx = scores.max(dim=-1) edge_idx = node_max.argsort(dim=-1, descending=True)[..., None] unm_idx = edge_idx[..., r:, :] # Unmerged Tokens src_idx = edge_idx[..., :r, :] # Merged Tokens - dst_idx = node_idx[..., None].gather(dim=-2, index=src_idx) + dst_idx = gather(node_idx[..., None], dim=-2, index=src_idx) def merge(x: torch.Tensor, mode="mean") -> torch.Tensor: src, dst = split(x) n, t1, c = src.shape - unm = src.gather(dim=-2, index=unm_idx.expand(n, t1 - r, c)) - src = src.gather(dim=-2, index=src_idx.expand(n, r, c)) + unm = gather(src, dim=-2, index=unm_idx.expand(n, t1 - r, c)) + src = gather(src, dim=-2, index=src_idx.expand(n, r, c)) dst = dst.scatter_reduce(-2, dst_idx.expand(n, r, c), src, reduce=mode) return torch.cat([unm, dst], dim=1) @@ -84,13 +111,13 @@ def bipartite_soft_matching_random2d(metric: torch.Tensor, unm, dst = x[..., :unm_len, :], x[..., unm_len:, :] _, _, c = unm.shape - src = dst.gather(dim=-2, index=dst_idx.expand(B, r, c)) + src = gather(dst, dim=-2, index=dst_idx.expand(B, r, c)) # Combine back to the original shape out = torch.zeros(B, N, c, device=x.device, dtype=x.dtype) out.scatter_(dim=-2, index=b_idx.expand(B, num_dst, c), src=dst) - out.scatter_(dim=-2, index=a_idx.expand(B, a_idx.shape[1], 1).gather(dim=1, index=unm_idx).expand(B, unm_len, c), src=unm) - out.scatter_(dim=-2, index=a_idx.expand(B, a_idx.shape[1], 1).gather(dim=1, index=src_idx).expand(B, r, c), src=src) + out.scatter_(dim=-2, index=gather(a_idx.expand(B, a_idx.shape[1], 1), dim=1, index=unm_idx).expand(B, unm_len, c), src=unm) + out.scatter_(dim=-2, index=gather(a_idx.expand(B, a_idx.shape[1], 1), dim=1, index=src_idx).expand(B, r, c), src=src) return out @@ -100,14 +127,14 @@ def bipartite_soft_matching_random2d(metric: torch.Tensor, def get_functions(x, ratio, original_shape): b, c, original_h, original_w = original_shape original_tokens = original_h * original_w - downsample = int(math.sqrt(original_tokens // x.shape[1])) + downsample = int(math.ceil(math.sqrt(original_tokens // x.shape[1]))) stride_x = 2 stride_y = 2 max_downsample = 1 if downsample <= max_downsample: - w = original_w // downsample - h = original_h // downsample + w = int(math.ceil(original_w / downsample)) + h = int(math.ceil(original_h / downsample)) r = int(x.shape[1] * ratio) no_rand = False m, u = bipartite_soft_matching_random2d(x, w, h, stride_x, stride_y, r, no_rand) From ca2ae98470fdebd951cdb750998b82ecb532c901 Mon Sep 17 00:00:00 2001 From: omar92 Date: Mon, 3 Apr 2023 22:01:18 +0200 Subject: [PATCH 11/62] check if workflowNode And widgets_values are defined as they were causing errors on QueuePrompt after loading workFlow --- web/extensions/core/dynamicPrompts.js | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/web/extensions/core/dynamicPrompts.js b/web/extensions/core/dynamicPrompts.js index 8528201d3..7dae07f4d 100644 --- a/web/extensions/core/dynamicPrompts.js +++ b/web/extensions/core/dynamicPrompts.js @@ -30,7 +30,8 @@ app.registerExtension({ } // Overwrite the value in the serialized workflow pnginfo - workflowNode.widgets_values[widgetIndex] = prompt; + if (workflowNode?.widgets_values) + workflowNode.widgets_values[widgetIndex] = prompt; return prompt; }; From dc24d7e2fd2967de6d3bc45971bfcb0274724f8b Mon Sep 17 00:00:00 2001 From: mligaintart <> Date: Mon, 3 Apr 2023 16:46:00 -0400 Subject: [PATCH 12/62] Adds orientation settings to reroute nodes, allowing for cleaner graphes. --- web/extensions/core/rerouteNode.js | 32 ++++++++++++++++++++++++++++-- 1 file changed, 30 insertions(+), 2 deletions(-) diff --git a/web/extensions/core/rerouteNode.js b/web/extensions/core/rerouteNode.js index aee5d147c..c31f63cd0 100644 --- a/web/extensions/core/rerouteNode.js +++ b/web/extensions/core/rerouteNode.js @@ -11,11 +11,14 @@ app.registerExtension({ this.properties = {}; } this.properties.showOutputText = RerouteNode.defaultVisibility; + this.properties.horizontal = false; this.addInput("", "*"); this.addOutput(this.properties.showOutputText ? "*" : "", "*"); this.onConnectionsChange = function (type, index, connected, link_info) { + this.applyOrientation(); + // Prevent multiple connections to different types when we have no input if (connected && type === LiteGraph.OUTPUT) { // Ignore wildcard nodes as these will be updated to real types @@ -49,8 +52,8 @@ app.registerExtension({ currentNode = null; } else { - // Move the previous node - currentNode = node; + // Move the previous node + currentNode = node; } } else { // We've found the end @@ -112,6 +115,7 @@ app.registerExtension({ node.__outputType = displayType; node.outputs[0].name = node.properties.showOutputText ? displayType : ""; node.size = node.computeSize(); + node.applyOrientation(); for (const l of node.outputs[0].links || []) { const link = app.graph.links[l]; @@ -153,6 +157,7 @@ app.registerExtension({ this.outputs[0].name = ""; } this.size = this.computeSize(); + this.applyOrientation(); app.graph.setDirtyCanvas(true, true); }, }, @@ -161,9 +166,32 @@ app.registerExtension({ callback: () => { RerouteNode.setDefaultTextVisibility(!RerouteNode.defaultVisibility); }, + }, + { + // naming is inverted with respect to LiteGraphNode.horizontal + // LiteGraphNode.horizontal == true means that + // each slot in the inputs and outputs are layed out horizontally, + // which is the opposite of the visual orientation of the inputs and outputs as a node + content: "Set " + (this.properties.horizontal ? "Horizontal" : "Vertical"), + callback: () => { + this.properties.horizontal = !this.properties.horizontal; + this.applyOrientation(); + }, } ); } + applyOrientation() { + this.horizontal = this.properties.horizontal; + if (this.horizontal) { + // we correct the input position, because LiteGraphNode.horizontal + // doesn't account for title presence + // which reroute nodes don't have + this.inputs[0].pos = [this.size[0] / 2, 0]; + } else { + delete this.inputs[0].pos; + } + app.graph.setDirtyCanvas(true, true); + } computeSize() { return [ From c02baed00fe8ea910d6def31d98308c4a92ae16a Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 3 Apr 2023 20:13:43 -0400 Subject: [PATCH 13/62] Add that unCLIP models are supported to the README. --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 84e0061ff..0f7d24c45 100644 --- a/README.md +++ b/README.md @@ -24,6 +24,7 @@ This ui will let you design and execute advanced stable diffusion pipelines usin - [Inpainting](https://comfyanonymous.github.io/ComfyUI_examples/inpaint/) with both regular and inpainting models. - [ControlNet and T2I-Adapter](https://comfyanonymous.github.io/ComfyUI_examples/controlnet/) - [Upscale Models (ESRGAN, ESRGAN variants, SwinIR, Swin2SR, etc...)](https://comfyanonymous.github.io/ComfyUI_examples/upscale_models/) +- [unCLIP Models](https://comfyanonymous.github.io/ComfyUI_examples/unclip/) - Starts up very fast. - Works fully offline: will never download anything. - [Config file](extra_model_paths.yaml.example) to set the search paths for models. From 23524ad8c5027e5691d749b6ae778106c469f16a Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 3 Apr 2023 22:58:54 -0400 Subject: [PATCH 14/62] Remove print. --- comfy/samplers.py | 1 - 1 file changed, 1 deletion(-) diff --git a/comfy/samplers.py b/comfy/samplers.py index 59dbab53d..93f5d361b 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -366,7 +366,6 @@ def encode_adm(noise_augmentor, conds, batch_size, device): #TODO: add a way to control this noise_augment = 0.05 noise_level = round((noise_augmentor.max_noise_level - 1) * noise_augment) - print(noise_level) c_adm, noise_level_emb = noise_augmentor(adm_out[:, :noise_augmentor.time_embed.dim], noise_level=torch.tensor([noise_level], device=device)) adm_out = torch.cat((c_adm, noise_level_emb), 1) else: From 5036fecbddcd4b7108d196a9c88d91c4480f390f Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 4 Apr 2023 03:42:40 -0400 Subject: [PATCH 15/62] Update colab notebook. --- notebooks/comfyui_colab.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/notebooks/comfyui_colab.ipynb b/notebooks/comfyui_colab.ipynb index a86ccc753..3e59fbde7 100644 --- a/notebooks/comfyui_colab.ipynb +++ b/notebooks/comfyui_colab.ipynb @@ -47,7 +47,7 @@ " !git pull\n", "\n", "!echo -= Install dependencies =-\n", - "!pip install xformers==0.0.16 -r requirements.txt --extra-index-url https://download.pytorch.org/whl/cu117" + "!pip install xformers -r requirements.txt --extra-index-url https://download.pytorch.org/whl/cu118" ] }, { From 56196ab0f72c8f671bd85b425744f80f02c823ea Mon Sep 17 00:00:00 2001 From: EllangoK Date: Tue, 4 Apr 2023 10:57:34 -0400 Subject: [PATCH 16/62] use common_upcale in blend --- comfy_extras/nodes_post_processing.py | 29 +++++---------------------- 1 file changed, 5 insertions(+), 24 deletions(-) diff --git a/comfy_extras/nodes_post_processing.py b/comfy_extras/nodes_post_processing.py index 322f3ca89..703deaabf 100644 --- a/comfy_extras/nodes_post_processing.py +++ b/comfy_extras/nodes_post_processing.py @@ -3,6 +3,8 @@ import torch import torch.nn.functional as F from PIL import Image +import comfy.utils + class Blend: def __init__(self): @@ -31,7 +33,9 @@ class Blend: def blend_images(self, image1: torch.Tensor, image2: torch.Tensor, blend_factor: float, blend_mode: str): if image1.shape != image2.shape: - image2 = self.crop_and_resize(image2, image1.shape) + image2 = image2.permute(0, 3, 1, 2) + image2 = comfy.utils.common_upscale(image2, image1.shape[2], image1.shape[1], upscale_method='bicubic', crop='center') + image2 = image2.permute(0, 2, 3, 1) blended_image = self.blend_mode(image1, image2, blend_mode) blended_image = image1 * (1 - blend_factor) + blended_image * blend_factor @@ -55,29 +59,6 @@ class Blend: def g(self, x): return torch.where(x <= 0.25, ((16 * x - 12) * x + 4) * x, torch.sqrt(x)) - def crop_and_resize(self, img: torch.Tensor, target_shape: tuple): - batch_size, img_h, img_w, img_c = img.shape - _, target_h, target_w, _ = target_shape - img_aspect_ratio = img_w / img_h - target_aspect_ratio = target_w / target_h - - # Crop center of the image to the target aspect ratio - if img_aspect_ratio > target_aspect_ratio: - new_width = int(img_h * target_aspect_ratio) - left = (img_w - new_width) // 2 - img = img[:, :, left:left + new_width, :] - else: - new_height = int(img_w / target_aspect_ratio) - top = (img_h - new_height) // 2 - img = img[:, top:top + new_height, :, :] - - # Resize to target size - img = img.permute(0, 3, 1, 2) # Torch wants (B, C, H, W) we use (B, H, W, C) - img = F.interpolate(img, size=(target_h, target_w), mode='bilinear', align_corners=False) - img = img.permute(0, 2, 3, 1) - - return img - class Blur: def __init__(self): pass From 1718730e80549c35ce3c5d3fb7926ce5654a2fdd Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 4 Apr 2023 11:49:29 -0400 Subject: [PATCH 17/62] Ignore embeddings when sizes don't match and print a WARNING. --- comfy/sd1_clip.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/comfy/sd1_clip.py b/comfy/sd1_clip.py index 93036b1ae..4f51657c3 100644 --- a/comfy/sd1_clip.py +++ b/comfy/sd1_clip.py @@ -74,9 +74,12 @@ class SD1ClipModel(torch.nn.Module, ClipTokenWeightEncoder): if isinstance(y, int): tokens_temp += [y] else: - embedding_weights += [y] - tokens_temp += [next_new_token] - next_new_token += 1 + if y.shape[0] == current_embeds.weight.shape[1]: + embedding_weights += [y] + tokens_temp += [next_new_token] + next_new_token += 1 + else: + print("WARNING: shape mismatch when trying to apply embedding, embedding will be ignored", y.shape[0], current_embeds.weight.shape[1]) out_tokens += [tokens_temp] if len(embedding_weights) > 0: From 080c758cda19288039de6941876dbdf6f3f9d357 Mon Sep 17 00:00:00 2001 From: City <125218114+city96@users.noreply.github.com> Date: Tue, 4 Apr 2023 18:16:23 +0200 Subject: [PATCH 18/62] Ask for confirmation before clearing nodes --- web/scripts/ui.js | 1 + 1 file changed, 1 insertion(+) diff --git a/web/scripts/ui.js b/web/scripts/ui.js index 679f10b20..68bfc792a 100644 --- a/web/scripts/ui.js +++ b/web/scripts/ui.js @@ -510,6 +510,7 @@ export class ComfyUI { $el("button", { textContent: "Load", onclick: () => fileInput.click() }), $el("button", { textContent: "Refresh", onclick: () => app.refreshComboInNodes() }), $el("button", { textContent: "Clear", onclick: () => { + if (!confirm("Are you sure you want to remove all nodes?")) return; app.clean(); app.graph.clear(); }}), From af291e6f69a66bce6460de58e6e9328f48640dd5 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 4 Apr 2023 13:56:13 -0400 Subject: [PATCH 19/62] Convert line endings to unix. --- comfy_extras/nodes_post_processing.py | 420 +++++++++++++------------- 1 file changed, 210 insertions(+), 210 deletions(-) diff --git a/comfy_extras/nodes_post_processing.py b/comfy_extras/nodes_post_processing.py index 703deaabf..de9ef0838 100644 --- a/comfy_extras/nodes_post_processing.py +++ b/comfy_extras/nodes_post_processing.py @@ -1,210 +1,210 @@ -import numpy as np -import torch -import torch.nn.functional as F -from PIL import Image - -import comfy.utils - - -class Blend: - def __init__(self): - pass - - @classmethod - def INPUT_TYPES(s): - return { - "required": { - "image1": ("IMAGE",), - "image2": ("IMAGE",), - "blend_factor": ("FLOAT", { - "default": 0.5, - "min": 0.0, - "max": 1.0, - "step": 0.01 - }), - "blend_mode": (["normal", "multiply", "screen", "overlay", "soft_light"],), - }, - } - - RETURN_TYPES = ("IMAGE",) - FUNCTION = "blend_images" - - CATEGORY = "postprocessing" - - def blend_images(self, image1: torch.Tensor, image2: torch.Tensor, blend_factor: float, blend_mode: str): - if image1.shape != image2.shape: - image2 = image2.permute(0, 3, 1, 2) - image2 = comfy.utils.common_upscale(image2, image1.shape[2], image1.shape[1], upscale_method='bicubic', crop='center') - image2 = image2.permute(0, 2, 3, 1) - - blended_image = self.blend_mode(image1, image2, blend_mode) - blended_image = image1 * (1 - blend_factor) + blended_image * blend_factor - blended_image = torch.clamp(blended_image, 0, 1) - return (blended_image,) - - def blend_mode(self, img1, img2, mode): - if mode == "normal": - return img2 - elif mode == "multiply": - return img1 * img2 - elif mode == "screen": - return 1 - (1 - img1) * (1 - img2) - elif mode == "overlay": - return torch.where(img1 <= 0.5, 2 * img1 * img2, 1 - 2 * (1 - img1) * (1 - img2)) - elif mode == "soft_light": - return torch.where(img2 <= 0.5, img1 - (1 - 2 * img2) * img1 * (1 - img1), img1 + (2 * img2 - 1) * (self.g(img1) - img1)) - else: - raise ValueError(f"Unsupported blend mode: {mode}") - - def g(self, x): - return torch.where(x <= 0.25, ((16 * x - 12) * x + 4) * x, torch.sqrt(x)) - -class Blur: - def __init__(self): - pass - - @classmethod - def INPUT_TYPES(s): - return { - "required": { - "image": ("IMAGE",), - "blur_radius": ("INT", { - "default": 1, - "min": 1, - "max": 31, - "step": 1 - }), - "sigma": ("FLOAT", { - "default": 1.0, - "min": 0.1, - "max": 10.0, - "step": 0.1 - }), - }, - } - - RETURN_TYPES = ("IMAGE",) - FUNCTION = "blur" - - CATEGORY = "postprocessing" - - def gaussian_kernel(self, kernel_size: int, sigma: float): - x, y = torch.meshgrid(torch.linspace(-1, 1, kernel_size), torch.linspace(-1, 1, kernel_size), indexing="ij") - d = torch.sqrt(x * x + y * y) - g = torch.exp(-(d * d) / (2.0 * sigma * sigma)) - return g / g.sum() - - def blur(self, image: torch.Tensor, blur_radius: int, sigma: float): - if blur_radius == 0: - return (image,) - - batch_size, height, width, channels = image.shape - - kernel_size = blur_radius * 2 + 1 - kernel = self.gaussian_kernel(kernel_size, sigma).repeat(channels, 1, 1).unsqueeze(1) - - image = image.permute(0, 3, 1, 2) # Torch wants (B, C, H, W) we use (B, H, W, C) - blurred = F.conv2d(image, kernel, padding=kernel_size // 2, groups=channels) - blurred = blurred.permute(0, 2, 3, 1) - - return (blurred,) - -class Quantize: - def __init__(self): - pass - - @classmethod - def INPUT_TYPES(s): - return { - "required": { - "image": ("IMAGE",), - "colors": ("INT", { - "default": 256, - "min": 1, - "max": 256, - "step": 1 - }), - "dither": (["none", "floyd-steinberg"],), - }, - } - - RETURN_TYPES = ("IMAGE",) - FUNCTION = "quantize" - - CATEGORY = "postprocessing" - - def quantize(self, image: torch.Tensor, colors: int = 256, dither: str = "FLOYDSTEINBERG"): - batch_size, height, width, _ = image.shape - result = torch.zeros_like(image) - - dither_option = Image.Dither.FLOYDSTEINBERG if dither == "floyd-steinberg" else Image.Dither.NONE - - for b in range(batch_size): - tensor_image = image[b] - img = (tensor_image * 255).to(torch.uint8).numpy() - pil_image = Image.fromarray(img, mode='RGB') - - palette = pil_image.quantize(colors=colors) # Required as described in https://github.com/python-pillow/Pillow/issues/5836 - quantized_image = pil_image.quantize(colors=colors, palette=palette, dither=dither_option) - - quantized_array = torch.tensor(np.array(quantized_image.convert("RGB"))).float() / 255 - result[b] = quantized_array - - return (result,) - -class Sharpen: - def __init__(self): - pass - - @classmethod - def INPUT_TYPES(s): - return { - "required": { - "image": ("IMAGE",), - "sharpen_radius": ("INT", { - "default": 1, - "min": 1, - "max": 31, - "step": 1 - }), - "alpha": ("FLOAT", { - "default": 1.0, - "min": 0.1, - "max": 5.0, - "step": 0.1 - }), - }, - } - - RETURN_TYPES = ("IMAGE",) - FUNCTION = "sharpen" - - CATEGORY = "postprocessing" - - def sharpen(self, image: torch.Tensor, sharpen_radius: int, alpha: float): - if sharpen_radius == 0: - return (image,) - - batch_size, height, width, channels = image.shape - - kernel_size = sharpen_radius * 2 + 1 - kernel = torch.ones((kernel_size, kernel_size), dtype=torch.float32) * -1 - center = kernel_size // 2 - kernel[center, center] = kernel_size**2 - kernel *= alpha - kernel = kernel.repeat(channels, 1, 1).unsqueeze(1) - - tensor_image = image.permute(0, 3, 1, 2) # Torch wants (B, C, H, W) we use (B, H, W, C) - sharpened = F.conv2d(tensor_image, kernel, padding=center, groups=channels) - sharpened = sharpened.permute(0, 2, 3, 1) - - result = torch.clamp(sharpened, 0, 1) - - return (result,) - -NODE_CLASS_MAPPINGS = { - "Blend": Blend, - "Blur": Blur, - "Quantize": Quantize, - "Sharpen": Sharpen, -} +import numpy as np +import torch +import torch.nn.functional as F +from PIL import Image + +import comfy.utils + + +class Blend: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image1": ("IMAGE",), + "image2": ("IMAGE",), + "blend_factor": ("FLOAT", { + "default": 0.5, + "min": 0.0, + "max": 1.0, + "step": 0.01 + }), + "blend_mode": (["normal", "multiply", "screen", "overlay", "soft_light"],), + }, + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "blend_images" + + CATEGORY = "postprocessing" + + def blend_images(self, image1: torch.Tensor, image2: torch.Tensor, blend_factor: float, blend_mode: str): + if image1.shape != image2.shape: + image2 = image2.permute(0, 3, 1, 2) + image2 = comfy.utils.common_upscale(image2, image1.shape[2], image1.shape[1], upscale_method='bicubic', crop='center') + image2 = image2.permute(0, 2, 3, 1) + + blended_image = self.blend_mode(image1, image2, blend_mode) + blended_image = image1 * (1 - blend_factor) + blended_image * blend_factor + blended_image = torch.clamp(blended_image, 0, 1) + return (blended_image,) + + def blend_mode(self, img1, img2, mode): + if mode == "normal": + return img2 + elif mode == "multiply": + return img1 * img2 + elif mode == "screen": + return 1 - (1 - img1) * (1 - img2) + elif mode == "overlay": + return torch.where(img1 <= 0.5, 2 * img1 * img2, 1 - 2 * (1 - img1) * (1 - img2)) + elif mode == "soft_light": + return torch.where(img2 <= 0.5, img1 - (1 - 2 * img2) * img1 * (1 - img1), img1 + (2 * img2 - 1) * (self.g(img1) - img1)) + else: + raise ValueError(f"Unsupported blend mode: {mode}") + + def g(self, x): + return torch.where(x <= 0.25, ((16 * x - 12) * x + 4) * x, torch.sqrt(x)) + +class Blur: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": ("IMAGE",), + "blur_radius": ("INT", { + "default": 1, + "min": 1, + "max": 31, + "step": 1 + }), + "sigma": ("FLOAT", { + "default": 1.0, + "min": 0.1, + "max": 10.0, + "step": 0.1 + }), + }, + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "blur" + + CATEGORY = "postprocessing" + + def gaussian_kernel(self, kernel_size: int, sigma: float): + x, y = torch.meshgrid(torch.linspace(-1, 1, kernel_size), torch.linspace(-1, 1, kernel_size), indexing="ij") + d = torch.sqrt(x * x + y * y) + g = torch.exp(-(d * d) / (2.0 * sigma * sigma)) + return g / g.sum() + + def blur(self, image: torch.Tensor, blur_radius: int, sigma: float): + if blur_radius == 0: + return (image,) + + batch_size, height, width, channels = image.shape + + kernel_size = blur_radius * 2 + 1 + kernel = self.gaussian_kernel(kernel_size, sigma).repeat(channels, 1, 1).unsqueeze(1) + + image = image.permute(0, 3, 1, 2) # Torch wants (B, C, H, W) we use (B, H, W, C) + blurred = F.conv2d(image, kernel, padding=kernel_size // 2, groups=channels) + blurred = blurred.permute(0, 2, 3, 1) + + return (blurred,) + +class Quantize: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": ("IMAGE",), + "colors": ("INT", { + "default": 256, + "min": 1, + "max": 256, + "step": 1 + }), + "dither": (["none", "floyd-steinberg"],), + }, + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "quantize" + + CATEGORY = "postprocessing" + + def quantize(self, image: torch.Tensor, colors: int = 256, dither: str = "FLOYDSTEINBERG"): + batch_size, height, width, _ = image.shape + result = torch.zeros_like(image) + + dither_option = Image.Dither.FLOYDSTEINBERG if dither == "floyd-steinberg" else Image.Dither.NONE + + for b in range(batch_size): + tensor_image = image[b] + img = (tensor_image * 255).to(torch.uint8).numpy() + pil_image = Image.fromarray(img, mode='RGB') + + palette = pil_image.quantize(colors=colors) # Required as described in https://github.com/python-pillow/Pillow/issues/5836 + quantized_image = pil_image.quantize(colors=colors, palette=palette, dither=dither_option) + + quantized_array = torch.tensor(np.array(quantized_image.convert("RGB"))).float() / 255 + result[b] = quantized_array + + return (result,) + +class Sharpen: + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": ("IMAGE",), + "sharpen_radius": ("INT", { + "default": 1, + "min": 1, + "max": 31, + "step": 1 + }), + "alpha": ("FLOAT", { + "default": 1.0, + "min": 0.1, + "max": 5.0, + "step": 0.1 + }), + }, + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "sharpen" + + CATEGORY = "postprocessing" + + def sharpen(self, image: torch.Tensor, sharpen_radius: int, alpha: float): + if sharpen_radius == 0: + return (image,) + + batch_size, height, width, channels = image.shape + + kernel_size = sharpen_radius * 2 + 1 + kernel = torch.ones((kernel_size, kernel_size), dtype=torch.float32) * -1 + center = kernel_size // 2 + kernel[center, center] = kernel_size**2 + kernel *= alpha + kernel = kernel.repeat(channels, 1, 1).unsqueeze(1) + + tensor_image = image.permute(0, 3, 1, 2) # Torch wants (B, C, H, W) we use (B, H, W, C) + sharpened = F.conv2d(tensor_image, kernel, padding=center, groups=channels) + sharpened = sharpened.permute(0, 2, 3, 1) + + result = torch.clamp(sharpened, 0, 1) + + return (result,) + +NODE_CLASS_MAPPINGS = { + "Blend": Blend, + "Blur": Blur, + "Quantize": Quantize, + "Sharpen": Sharpen, +} From de3d5f46ce0544339884fe454a59b342fcf28cf3 Mon Sep 17 00:00:00 2001 From: missionfloyd Date: Tue, 4 Apr 2023 19:32:49 -0600 Subject: [PATCH 20/62] Fix .graphdialog style --- web/style.css | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/web/style.css b/web/style.css index 9162bbba9..393d1667e 100644 --- a/web/style.css +++ b/web/style.css @@ -237,3 +237,28 @@ button.comfy-queue-btn { visibility:hidden } } + +.graphdialog { + min-height: 1em; +} + +.graphdialog .name { + font-size: 14px; + font-family: sans-serif; + color: #999999; +} + +.graphdialog button { + margin-top: unset; + vertical-align: unset; + height: 1.6em; + padding-right: 8px; +} + +.graphdialog input, .graphdialog textarea, .graphdialog select { + background-color: #222; + border: 2px solid; + border-color: #444444; + color: #ddd; + border-radius: 12px 0 0 12px; +} From bf7dbe4702ccfd02f92862238a8da3b6addc656b Mon Sep 17 00:00:00 2001 From: Adam Schwalm Date: Mon, 3 Apr 2023 20:05:46 -0500 Subject: [PATCH 21/62] Add left/right/escape hotkeys for image nodes --- web/scripts/app.js | 41 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/web/scripts/app.js b/web/scripts/app.js index 501c7ea65..1ecd4610f 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -102,6 +102,46 @@ class ComfyApp { }; } + #addNodeKeyHandler(node) { + const app = this; + const origNodeOnKeyDown = node.prototype.onKeyDown; + + node.prototype.onKeyDown = function(e) { + if (origNodeOnKeyDown && origNodeOnKeyDown.apply(this, e) === false) { + return false; + } + + if (this.flags.collapsed || !this.imgs || this.imageIndex === null) { + return; + } + + let handled = false; + + if (e.key === "ArrowLeft" || e.key === "ArrowRight") { + if (e.key === "ArrowLeft") { + this.imageIndex -= 1; + } else if (e.key === "ArrowRight") { + this.imageIndex += 1; + } + this.imageIndex %= this.imgs.length; + + if (this.imageIndex < 0) { + this.imageIndex = this.imgs.length + this.imageIndex; + } + handled = true; + } else if (e.key === "Escape") { + this.imageIndex = null; + handled = true; + } + + if (handled === true) { + e.preventDefault(); + e.stopImmediatePropagation(); + return false; + } + } + } + /** * Adds Custom drawing logic for nodes * e.g. Draws images and handles thumbnail navigation on nodes that output images @@ -785,6 +825,7 @@ class ComfyApp { this.#addNodeContextMenuHandler(node); this.#addDrawBackgroundHandler(node, app); + this.#addNodeKeyHandler(node); await this.#invokeExtensionsAsync("beforeRegisterNodeDef", node, nodeData); LiteGraph.registerNodeType(nodeId, node); From e46b1c3034a23eeb048e279d0d285737d39a4b1a Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 4 Apr 2023 22:22:02 -0400 Subject: [PATCH 22/62] Disable xformers in VAE when xformers == 0.0.18 --- comfy/ldm/modules/diffusionmodules/model.py | 4 ++-- comfy/model_management.py | 14 ++++++++++++++ 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/comfy/ldm/modules/diffusionmodules/model.py b/comfy/ldm/modules/diffusionmodules/model.py index 94f5510b9..788a6fc4f 100644 --- a/comfy/ldm/modules/diffusionmodules/model.py +++ b/comfy/ldm/modules/diffusionmodules/model.py @@ -9,7 +9,7 @@ from typing import Optional, Any from ldm.modules.attention import MemoryEfficientCrossAttention import model_management -if model_management.xformers_enabled(): +if model_management.xformers_enabled_vae(): import xformers import xformers.ops @@ -364,7 +364,7 @@ class MemoryEfficientCrossAttentionWrapper(MemoryEfficientCrossAttention): def make_attn(in_channels, attn_type="vanilla", attn_kwargs=None): assert attn_type in ["vanilla", "vanilla-xformers", "memory-efficient-cross-attn", "linear", "none"], f'attn_type {attn_type} unknown' - if model_management.xformers_enabled() and attn_type == "vanilla": + if model_management.xformers_enabled_vae() and attn_type == "vanilla": attn_type = "vanilla-xformers" if model_management.pytorch_attention_enabled() and attn_type == "vanilla": attn_type = "vanilla-pytorch" diff --git a/comfy/model_management.py b/comfy/model_management.py index 4aa47ff16..052dfb775 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -199,11 +199,25 @@ def get_autocast_device(dev): return dev.type return "cuda" + def xformers_enabled(): if vram_state == CPU: return False return XFORMERS_IS_AVAILBLE + +def xformers_enabled_vae(): + enabled = xformers_enabled() + if not enabled: + return False + try: + #0.0.18 has a bug where Nan is returned when inputs are too big (1152x1920 res images and above) + if xformers.version.__version__ == "0.0.18": + return False + except: + pass + return enabled + def pytorch_attention_enabled(): return ENABLE_PYTORCH_ATTENTION From 10ad4c1d17d8ea469565d904a8f47f1d2eeee459 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 4 Apr 2023 22:48:11 -0400 Subject: [PATCH 23/62] Move unclip stuff out of _for_testing --- nodes.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/nodes.py b/nodes.py index 28beb37b1..935e28b84 100644 --- a/nodes.py +++ b/nodes.py @@ -197,7 +197,7 @@ class CheckpointLoader: RETURN_TYPES = ("MODEL", "CLIP", "VAE") FUNCTION = "load_checkpoint" - CATEGORY = "loaders" + CATEGORY = "advanced/loaders" def load_checkpoint(self, config_name, ckpt_name, output_vae=True, output_clip=True): config_path = folder_paths.get_full_path("configs", config_name) @@ -227,7 +227,7 @@ class unCLIPCheckpointLoader: RETURN_TYPES = ("MODEL", "CLIP", "VAE", "CLIP_VISION") FUNCTION = "load_checkpoint" - CATEGORY = "_for_testing/unclip" + CATEGORY = "loaders" def load_checkpoint(self, ckpt_name, output_vae=True, output_clip=True): ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name) @@ -450,7 +450,7 @@ class unCLIPConditioning: RETURN_TYPES = ("CONDITIONING",) FUNCTION = "apply_adm" - CATEGORY = "_for_testing/unclip" + CATEGORY = "conditioning" def apply_adm(self, conditioning, clip_vision_output, strength, noise_augmentation): c = [] @@ -1038,7 +1038,6 @@ class ImagePadForOutpaint: NODE_CLASS_MAPPINGS = { "KSampler": KSampler, - "CheckpointLoader": CheckpointLoader, "CheckpointLoaderSimple": CheckpointLoaderSimple, "CLIPTextEncode": CLIPTextEncode, "CLIPSetLastLayer": CLIPSetLastLayer, @@ -1077,6 +1076,7 @@ NODE_CLASS_MAPPINGS = { "VAEEncodeTiled": VAEEncodeTiled, "TomePatchModel": TomePatchModel, "unCLIPCheckpointLoader": unCLIPCheckpointLoader, + "CheckpointLoader": CheckpointLoader, } def load_custom_node(module_path): From 871a76b77b9cafa8615da1cedeaafc1b10cf85e3 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 4 Apr 2023 22:54:33 -0400 Subject: [PATCH 24/62] Rename and reorganize post processing nodes. --- comfy_extras/nodes_post_processing.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/comfy_extras/nodes_post_processing.py b/comfy_extras/nodes_post_processing.py index de9ef0838..ba699e2b8 100644 --- a/comfy_extras/nodes_post_processing.py +++ b/comfy_extras/nodes_post_processing.py @@ -29,7 +29,7 @@ class Blend: RETURN_TYPES = ("IMAGE",) FUNCTION = "blend_images" - CATEGORY = "postprocessing" + CATEGORY = "image/postprocessing" def blend_images(self, image1: torch.Tensor, image2: torch.Tensor, blend_factor: float, blend_mode: str): if image1.shape != image2.shape: @@ -86,7 +86,7 @@ class Blur: RETURN_TYPES = ("IMAGE",) FUNCTION = "blur" - CATEGORY = "postprocessing" + CATEGORY = "image/postprocessing" def gaussian_kernel(self, kernel_size: int, sigma: float): x, y = torch.meshgrid(torch.linspace(-1, 1, kernel_size), torch.linspace(-1, 1, kernel_size), indexing="ij") @@ -131,7 +131,7 @@ class Quantize: RETURN_TYPES = ("IMAGE",) FUNCTION = "quantize" - CATEGORY = "postprocessing" + CATEGORY = "image/postprocessing" def quantize(self, image: torch.Tensor, colors: int = 256, dither: str = "FLOYDSTEINBERG"): batch_size, height, width, _ = image.shape @@ -179,7 +179,7 @@ class Sharpen: RETURN_TYPES = ("IMAGE",) FUNCTION = "sharpen" - CATEGORY = "postprocessing" + CATEGORY = "image/postprocessing" def sharpen(self, image: torch.Tensor, sharpen_radius: int, alpha: float): if sharpen_radius == 0: @@ -203,8 +203,8 @@ class Sharpen: return (result,) NODE_CLASS_MAPPINGS = { - "Blend": Blend, - "Blur": Blur, - "Quantize": Quantize, - "Sharpen": Sharpen, + "ImageBlend": Blend, + "ImageBlur": Blur, + "ImageQuantize": Quantize, + "ImageSharpen": Sharpen, } From 1b556ea9f43c4ead1235dfefd7d84e193667f6ef Mon Sep 17 00:00:00 2001 From: missionfloyd Date: Tue, 4 Apr 2023 21:20:49 -0600 Subject: [PATCH 25/62] Add confirmation for clearing canvas --- web/scripts/ui.js | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/web/scripts/ui.js b/web/scripts/ui.js index 679f10b20..3f6308f24 100644 --- a/web/scripts/ui.js +++ b/web/scripts/ui.js @@ -115,6 +115,13 @@ function dragElement(dragEl, settings) { savePos = value; }, }); + + settings.addSetting({ + id: "Comfy.ConfirmClear", + name: "Require confirmation when clearing workflow", + type: "boolean", + defaultValue: false, + }); function dragMouseDown(e) { e = e || window.event; @@ -510,10 +517,16 @@ export class ComfyUI { $el("button", { textContent: "Load", onclick: () => fileInput.click() }), $el("button", { textContent: "Refresh", onclick: () => app.refreshComboInNodes() }), $el("button", { textContent: "Clear", onclick: () => { - app.clean(); - app.graph.clear(); + if (localStorage.getItem("Comfy.Settings.Comfy.ConfirmClear") == "false" || confirm("Clear workflow?")) { + app.clean(); + app.graph.clear(); + } + }}), + $el("button", { textContent: "Load Default", onclick: () => { + if (localStorage.getItem("Comfy.Settings.Comfy.ConfirmClear") == "false" || confirm("Load default workflow?")) { + app.loadGraphData() + } }}), - $el("button", { textContent: "Load Default", onclick: () => app.loadGraphData() }), ]); dragElement(this.menuContainer, this.settings); From 30f274bf48419d98f646211df30fe9e074a28a66 Mon Sep 17 00:00:00 2001 From: missionfloyd Date: Tue, 4 Apr 2023 21:53:02 -0600 Subject: [PATCH 26/62] Make the default true --- web/scripts/ui.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/scripts/ui.js b/web/scripts/ui.js index 3f6308f24..91821fac0 100644 --- a/web/scripts/ui.js +++ b/web/scripts/ui.js @@ -120,7 +120,7 @@ function dragElement(dragEl, settings) { id: "Comfy.ConfirmClear", name: "Require confirmation when clearing workflow", type: "boolean", - defaultValue: false, + defaultValue: true, }); function dragMouseDown(e) { From a595c56872309e310fed7bb877bcd7caee8ef563 Mon Sep 17 00:00:00 2001 From: missionfloyd Date: Tue, 4 Apr 2023 22:03:22 -0600 Subject: [PATCH 27/62] Remove menu drag handle --- web/scripts/ui.js | 3 +-- web/style.css | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/web/scripts/ui.js b/web/scripts/ui.js index 68bfc792a..df0d8b4a3 100644 --- a/web/scripts/ui.js +++ b/web/scripts/ui.js @@ -414,8 +414,7 @@ export class ComfyUI { }); this.menuContainer = $el("div.comfy-menu", { parent: document.body }, [ - $el("div", { style: { overflow: "hidden", position: "relative", width: "100%" } }, [ - $el("span.drag-handle"), + $el("div.drag-handle", { style: { overflow: "hidden", position: "relative", width: "100%", cursor: "default" } }, [ $el("span", { $: (q) => (this.queueSize = q) }), $el("button.comfy-settings-btn", { textContent: "⚙️", onclick: () => this.settings.show() }), ]), diff --git a/web/style.css b/web/style.css index 393d1667e..1263c6648 100644 --- a/web/style.css +++ b/web/style.css @@ -105,7 +105,7 @@ body { background-color: #353535; font-family: sans-serif; padding: 10px; - border-radius: 0 8px 8px 8px; + border-radius: 8px; box-shadow: 3px 3px 8px rgba(0, 0, 0, 0.4); } From 8af2fe1e8747e142e133640659187136eb330d0f Mon Sep 17 00:00:00 2001 From: missionfloyd Date: Tue, 4 Apr 2023 22:10:45 -0600 Subject: [PATCH 28/62] Remove redundant lines --- web/style.css | 3 --- 1 file changed, 3 deletions(-) diff --git a/web/style.css b/web/style.css index 1263c6648..c04b40ec4 100644 --- a/web/style.css +++ b/web/style.css @@ -88,13 +88,10 @@ body { } .comfy-menu { - width: 200px; font-size: 15px; position: absolute; top: 50%; right: 0%; - background-color: white; - color: #000; text-align: center; z-index: 100; width: 170px; From 623afa2ced69085d7996921a0d312968a448109b Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Wed, 5 Apr 2023 10:51:04 +0100 Subject: [PATCH 29/62] Made accessing setting value easier Updated clear check to use this --- web/scripts/ui.js | 29 +++++++++++++++++++---------- 1 file changed, 19 insertions(+), 10 deletions(-) diff --git a/web/scripts/ui.js b/web/scripts/ui.js index 91821fac0..aea1a94b8 100644 --- a/web/scripts/ui.js +++ b/web/scripts/ui.js @@ -115,14 +115,6 @@ function dragElement(dragEl, settings) { savePos = value; }, }); - - settings.addSetting({ - id: "Comfy.ConfirmClear", - name: "Require confirmation when clearing workflow", - type: "boolean", - defaultValue: true, - }); - function dragMouseDown(e) { e = e || window.event; e.preventDefault(); @@ -289,6 +281,16 @@ class ComfySettingsDialog extends ComfyDialog { return element; }, }); + + const self = this; + return { + get value() { + return self.getSettingValue(id); + }, + set value(v) { + self.setSettingValue(id, value); + }, + }; } show() { @@ -410,6 +412,13 @@ export class ComfyUI { this.history.update(); }); + const confirmClear = this.settings.addSetting({ + id: "Comfy.ConfirmClear", + name: "Require confirmation when clearing workflow", + type: "boolean", + defaultValue: true, + }); + const fileInput = $el("input", { type: "file", accept: ".json,image/png", @@ -517,13 +526,13 @@ export class ComfyUI { $el("button", { textContent: "Load", onclick: () => fileInput.click() }), $el("button", { textContent: "Refresh", onclick: () => app.refreshComboInNodes() }), $el("button", { textContent: "Clear", onclick: () => { - if (localStorage.getItem("Comfy.Settings.Comfy.ConfirmClear") == "false" || confirm("Clear workflow?")) { + if (!confirmClear.value || confirm("Clear workflow?")) { app.clean(); app.graph.clear(); } }}), $el("button", { textContent: "Load Default", onclick: () => { - if (localStorage.getItem("Comfy.Settings.Comfy.ConfirmClear") == "false" || confirm("Load default workflow?")) { + if (!confirmClear.value || confirm("Load default workflow?")) { app.loadGraphData() } }}), From db16932be5eec5446fbae898ca1365bfae58d90a Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Wed, 5 Apr 2023 10:52:35 +0100 Subject: [PATCH 30/62] Fix setting --- web/scripts/ui.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/scripts/ui.js b/web/scripts/ui.js index aea1a94b8..9952606d4 100644 --- a/web/scripts/ui.js +++ b/web/scripts/ui.js @@ -288,7 +288,7 @@ class ComfySettingsDialog extends ComfyDialog { return self.getSettingValue(id); }, set value(v) { - self.setSettingValue(id, value); + self.setSettingValue(id, v); }, }; } From 1030ab0d8fd91e5c1167a087397047603102f069 Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Wed, 5 Apr 2023 11:02:34 +0100 Subject: [PATCH 31/62] Reload setting value --- web/scripts/ui.js | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/web/scripts/ui.js b/web/scripts/ui.js index 9952606d4..b6b8e06b2 100644 --- a/web/scripts/ui.js +++ b/web/scripts/ui.js @@ -225,6 +225,7 @@ class ComfySettingsDialog extends ComfyDialog { }; let element; + value = this.getSettingValue(id, defaultValue); if (typeof type === "function") { element = type(name, setter, value, attrs); @@ -418,7 +419,7 @@ export class ComfyUI { type: "boolean", defaultValue: true, }); - + const fileInput = $el("input", { type: "file", accept: ".json,image/png", From 37713e3b0acfc576f4eafc0b47582374ab5987dc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E8=97=8D+85CD?= <50108258+kwaa@users.noreply.github.com> Date: Wed, 5 Apr 2023 21:22:14 +0800 Subject: [PATCH 32/62] Add basic XPU device support closed #387 --- comfy/model_management.py | 24 ++++++++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index 052dfb775..f0b8be55e 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -5,6 +5,7 @@ LOW_VRAM = 2 NORMAL_VRAM = 3 HIGH_VRAM = 4 MPS = 5 +XPU = 6 accelerate_enabled = False vram_state = NORMAL_VRAM @@ -85,10 +86,17 @@ try: except: pass +try: + import intel_extension_for_pytorch + if torch.xpu.is_available(): + vram_state = XPU +except: + pass + if forced_cpu: vram_state = CPU -print("Set vram state to:", ["CPU", "NO VRAM", "LOW VRAM", "NORMAL VRAM", "HIGH VRAM", "MPS"][vram_state]) +print("Set vram state to:", ["CPU", "NO VRAM", "LOW VRAM", "NORMAL VRAM", "HIGH VRAM", "MPS", "XPU"][vram_state]) current_loaded_model = None @@ -141,6 +149,9 @@ def load_model_gpu(model): mps_device = torch.device("mps") real_model.to(mps_device) pass + elif vram_state == XPU: + real_model.to("xpu") + pass elif vram_state == NORMAL_VRAM or vram_state == HIGH_VRAM: model_accelerated = False real_model.cuda() @@ -189,6 +200,8 @@ def unload_if_low_vram(model): def get_torch_device(): if vram_state == MPS: return torch.device("mps") + if vram_state == XPU: + return torch.device("xpu") if vram_state == CPU: return torch.device("cpu") else: @@ -228,6 +241,9 @@ def get_free_memory(dev=None, torch_free_too=False): if hasattr(dev, 'type') and (dev.type == 'cpu' or dev.type == 'mps'): mem_free_total = psutil.virtual_memory().available mem_free_torch = mem_free_total + elif hasattr(dev, 'type') and (dev.type == 'xpu'): + mem_free_total = torch.xpu.get_device_properties(dev).total_memory - torch.xpu.memory_allocated(dev) + mem_free_torch = mem_free_total else: stats = torch.cuda.memory_stats(dev) mem_active = stats['active_bytes.all.current'] @@ -258,8 +274,12 @@ def mps_mode(): global vram_state return vram_state == MPS +def xpu_mode(): + global vram_state + return vram_state == XPU + def should_use_fp16(): - if cpu_mode() or mps_mode(): + if cpu_mode() or mps_mode() or xpu_mode(): return False #TODO ? if torch.cuda.is_bf16_supported(): From 1ced2bdd2da9a13caf72d7bff36d7f645f443fc7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E8=97=8D+85CD?= <50108258+kwaa@users.noreply.github.com> Date: Wed, 5 Apr 2023 21:25:37 +0800 Subject: [PATCH 33/62] Specify safetensors version to avoid upstream errors https://github.com/huggingface/safetensors/issues/142 --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 3b4040a29..0527b31df 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,7 +4,7 @@ torchsde einops open-clip-torch transformers>=4.25.1 -safetensors +safetensors>=0.3.0 pytorch_lightning aiohttp accelerate From 3536a7c8d148f738d30a375eab859c74da91a25a Mon Sep 17 00:00:00 2001 From: missionfloyd Date: Wed, 5 Apr 2023 08:57:44 -0600 Subject: [PATCH 34/62] Put drag icon back --- web/scripts/ui.js | 1 + web/style.css | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/web/scripts/ui.js b/web/scripts/ui.js index df0d8b4a3..621ca70ee 100644 --- a/web/scripts/ui.js +++ b/web/scripts/ui.js @@ -415,6 +415,7 @@ export class ComfyUI { this.menuContainer = $el("div.comfy-menu", { parent: document.body }, [ $el("div.drag-handle", { style: { overflow: "hidden", position: "relative", width: "100%", cursor: "default" } }, [ + $el("span.drag-handle"), $el("span", { $: (q) => (this.queueSize = q) }), $el("button.comfy-settings-btn", { textContent: "⚙️", onclick: () => this.settings.show() }), ]), diff --git a/web/style.css b/web/style.css index c04b40ec4..f2dd4e956 100644 --- a/web/style.css +++ b/web/style.css @@ -102,7 +102,7 @@ body { background-color: #353535; font-family: sans-serif; padding: 10px; - border-radius: 8px; + border-radius: 0 8px 8px 8px; box-shadow: 3px 3px 8px rgba(0, 0, 0, 0.4); } From f816964847d557d2ec94cf52531c43f91751cc28 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 5 Apr 2023 14:01:01 -0400 Subject: [PATCH 35/62] Add a way to set output directory with --output-directory --- folder_paths.py | 34 ++++++++++++++++++++++++++++++++++ main.py | 9 +++++++++ nodes.py | 29 ++++++++++++++--------------- server.py | 6 +++--- 4 files changed, 60 insertions(+), 18 deletions(-) diff --git a/folder_paths.py b/folder_paths.py index af56a6da1..f13e4895f 100644 --- a/folder_paths.py +++ b/folder_paths.py @@ -27,6 +27,40 @@ folder_names_and_paths["embeddings"] = ([os.path.join(models_dir, "embeddings")] folder_names_and_paths["controlnet"] = ([os.path.join(models_dir, "controlnet"), os.path.join(models_dir, "t2i_adapter")], supported_pt_extensions) folder_names_and_paths["upscale_models"] = ([os.path.join(models_dir, "upscale_models")], supported_pt_extensions) +output_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), "output") +temp_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), "temp") +input_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), "input") + +if not os.path.exists(input_directory): + os.makedirs(input_directory) + +def set_output_directory(output_dir): + global output_directory + output_directory = output_dir + +def get_output_directory(): + global output_directory + return output_directory + +def get_temp_directory(): + global temp_directory + return temp_directory + +def get_input_directory(): + global input_directory + return input_directory + + +#NOTE: used in http server so don't put folders that should not be accessed remotely +def get_directory_by_type(type_name): + if type_name == "output": + return get_output_directory() + if type_name == "temp": + return get_temp_directory() + if type_name == "input": + return get_input_directory() + return None + def add_model_folder_path(folder_name, full_folder_path): global folder_names_and_paths diff --git a/main.py b/main.py index fbfaf6be5..a3549b86f 100644 --- a/main.py +++ b/main.py @@ -17,6 +17,7 @@ if __name__ == "__main__": print("\t--port 8188\t\t\tSet the listen port.") print() print("\t--extra-model-paths-config file.yaml\tload an extra_model_paths.yaml file.") + print("\t--output-directory path/to/output\tSet the ComfyUI output directory.") print() print() print("\t--dont-upcast-attention\t\tDisable upcasting of attention \n\t\t\t\t\tcan boost speed but increase the chances of black images.\n") @@ -134,6 +135,14 @@ if __name__ == "__main__": for i in indices: load_extra_path_config(sys.argv[i]) + try: + output_dir = sys.argv[sys.argv.index('--output-directory') + 1] + output_dir = os.path.abspath(output_dir) + print("setting output directory to:", output_dir) + folder_paths.set_output_directory(output_dir) + except: + pass + port = 8188 try: p_index = sys.argv.index('--port') diff --git a/nodes.py b/nodes.py index 935e28b84..187d54a11 100644 --- a/nodes.py +++ b/nodes.py @@ -777,7 +777,7 @@ class KSamplerAdvanced: class SaveImage: def __init__(self): - self.output_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "output") + self.output_dir = folder_paths.get_output_directory() self.type = "output" @classmethod @@ -829,9 +829,6 @@ class SaveImage: os.makedirs(full_output_folder, exist_ok=True) counter = 1 - if not os.path.exists(self.output_dir): - os.makedirs(self.output_dir) - results = list() for image in images: i = 255. * image.cpu().numpy() @@ -856,7 +853,7 @@ class SaveImage: class PreviewImage(SaveImage): def __init__(self): - self.output_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "temp") + self.output_dir = folder_paths.get_temp_directory() self.type = "temp" @classmethod @@ -867,13 +864,11 @@ class PreviewImage(SaveImage): } class LoadImage: - input_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "input") @classmethod def INPUT_TYPES(s): - if not os.path.exists(s.input_dir): - os.makedirs(s.input_dir) + input_dir = folder_paths.get_input_directory() return {"required": - {"image": (sorted(os.listdir(s.input_dir)), )}, + {"image": (sorted(os.listdir(input_dir)), )}, } CATEGORY = "image" @@ -881,7 +876,8 @@ class LoadImage: RETURN_TYPES = ("IMAGE", "MASK") FUNCTION = "load_image" def load_image(self, image): - image_path = os.path.join(self.input_dir, image) + input_dir = folder_paths.get_input_directory() + image_path = os.path.join(input_dir, image) i = Image.open(image_path) image = i.convert("RGB") image = np.array(image).astype(np.float32) / 255.0 @@ -895,18 +891,19 @@ class LoadImage: @classmethod def IS_CHANGED(s, image): - image_path = os.path.join(s.input_dir, image) + input_dir = folder_paths.get_input_directory() + image_path = os.path.join(input_dir, image) m = hashlib.sha256() with open(image_path, 'rb') as f: m.update(f.read()) return m.digest().hex() class LoadImageMask: - input_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "input") @classmethod def INPUT_TYPES(s): + input_dir = folder_paths.get_input_directory() return {"required": - {"image": (sorted(os.listdir(s.input_dir)), ), + {"image": (sorted(os.listdir(input_dir)), ), "channel": (["alpha", "red", "green", "blue"], ),} } @@ -915,7 +912,8 @@ class LoadImageMask: RETURN_TYPES = ("MASK",) FUNCTION = "load_image" def load_image(self, image, channel): - image_path = os.path.join(self.input_dir, image) + input_dir = folder_paths.get_input_directory() + image_path = os.path.join(input_dir, image) i = Image.open(image_path) mask = None c = channel[0].upper() @@ -930,7 +928,8 @@ class LoadImageMask: @classmethod def IS_CHANGED(s, image, channel): - image_path = os.path.join(s.input_dir, image) + input_dir = folder_paths.get_input_directory() + image_path = os.path.join(input_dir, image) m = hashlib.sha256() with open(image_path, 'rb') as f: m.update(f.read()) diff --git a/server.py b/server.py index 963daefff..840d9a4e7 100644 --- a/server.py +++ b/server.py @@ -89,7 +89,7 @@ class PromptServer(): @routes.post("/upload/image") async def upload_image(request): - upload_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "input") + upload_dir = folder_paths.get_input_directory() if not os.path.exists(upload_dir): os.makedirs(upload_dir) @@ -122,10 +122,10 @@ class PromptServer(): async def view_image(request): if "filename" in request.rel_url.query: type = request.rel_url.query.get("type", "output") - if type not in ["output", "input", "temp"]: + output_dir = folder_paths.get_directory_by_type(type) + if output_dir is None: return web.Response(status=400) - output_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), type) if "subfolder" in request.rel_url.query: full_output_dir = os.path.join(output_dir, request.rel_url.query["subfolder"]) if os.path.commonpath((os.path.abspath(full_output_dir), output_dir)) != output_dir: From 5456b7555c6cc40a302ac9404603bfdf9c08f95c Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Wed, 5 Apr 2023 19:58:06 +0100 Subject: [PATCH 36/62] Add missing defaultValue arg --- web/scripts/ui.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/scripts/ui.js b/web/scripts/ui.js index b6b8e06b2..3af29ba73 100644 --- a/web/scripts/ui.js +++ b/web/scripts/ui.js @@ -286,7 +286,7 @@ class ComfySettingsDialog extends ComfyDialog { const self = this; return { get value() { - return self.getSettingValue(id); + return self.getSettingValue(id, defaultValue); }, set value(v) { self.setSettingValue(id, v); From 1a74611c6e725f1ffb6629d08fbd04bb658f2704 Mon Sep 17 00:00:00 2001 From: missionfloyd Date: Wed, 5 Apr 2023 15:56:41 -0600 Subject: [PATCH 37/62] Style modals to match rest of UI --- web/scripts/ui.js | 32 +++++++++++++-------- web/style.css | 71 +++++++++++++++++++++++------------------------ 2 files changed, 55 insertions(+), 48 deletions(-) diff --git a/web/scripts/ui.js b/web/scripts/ui.js index 91821fac0..4ef24e007 100644 --- a/web/scripts/ui.js +++ b/web/scripts/ui.js @@ -115,14 +115,6 @@ function dragElement(dragEl, settings) { savePos = value; }, }); - - settings.addSetting({ - id: "Comfy.ConfirmClear", - name: "Require confirmation when clearing workflow", - type: "boolean", - defaultValue: true, - }); - function dragMouseDown(e) { e = e || window.event; e.preventDefault(); @@ -170,7 +162,7 @@ class ComfyDialog { $el("p", { $: (p) => (this.textElement = p) }), $el("button", { type: "button", - textContent: "CLOSE", + textContent: "Close", onclick: () => this.close(), }), ]), @@ -233,6 +225,7 @@ class ComfySettingsDialog extends ComfyDialog { }; let element; + value = this.getSettingValue(id, defaultValue); if (typeof type === "function") { element = type(name, setter, value, attrs); @@ -289,6 +282,16 @@ class ComfySettingsDialog extends ComfyDialog { return element; }, }); + + const self = this; + return { + get value() { + return self.getSettingValue(id, defaultValue); + }, + set value(v) { + self.setSettingValue(id, v); + }, + }; } show() { @@ -410,6 +413,13 @@ export class ComfyUI { this.history.update(); }); + const confirmClear = this.settings.addSetting({ + id: "Comfy.ConfirmClear", + name: "Require confirmation when clearing workflow", + type: "boolean", + defaultValue: true, + }); + const fileInput = $el("input", { type: "file", accept: ".json,image/png", @@ -517,13 +527,13 @@ export class ComfyUI { $el("button", { textContent: "Load", onclick: () => fileInput.click() }), $el("button", { textContent: "Refresh", onclick: () => app.refreshComboInNodes() }), $el("button", { textContent: "Clear", onclick: () => { - if (localStorage.getItem("Comfy.Settings.Comfy.ConfirmClear") == "false" || confirm("Clear workflow?")) { + if (!confirmClear.value || confirm("Clear workflow?")) { app.clean(); app.graph.clear(); } }}), $el("button", { textContent: "Load Default", onclick: () => { - if (localStorage.getItem("Comfy.Settings.Comfy.ConfirmClear") == "false" || confirm("Load default workflow?")) { + if (!confirmClear.value || confirm("Load default workflow?")) { app.loadGraphData() } }}), diff --git a/web/style.css b/web/style.css index 393d1667e..d347bd454 100644 --- a/web/style.css +++ b/web/style.css @@ -39,18 +39,19 @@ body { position: fixed; /* Stay in place */ z-index: 100; /* Sit on top */ padding: 30px 30px 10px 30px; - background-color: #ff0000; /* Modal background */ + background-color: #353535; /* Modal background */ + color: #ff4444; box-shadow: 0px 0px 20px #888888; border-radius: 10px; - text-align: center; top: 50%; left: 50%; max-width: 80vw; max-height: 80vh; transform: translate(-50%, -50%); overflow: hidden; - min-width: 60%; justify-content: center; + font-family: monospace; + font-size: 15px; } .comfy-modal-content { @@ -70,23 +71,6 @@ body { margin: 3px 3px 3px 4px; } -.comfy-modal button { - cursor: pointer; - color: #aaaaaa; - border: none; - background-color: transparent; - font-size: 24px; - font-weight: bold; - width: 100%; -} - -.comfy-modal button:hover, -.comfy-modal button:focus { - color: #000; - text-decoration: none; - cursor: pointer; -} - .comfy-menu { width: 200px; font-size: 15px; @@ -109,7 +93,8 @@ body { box-shadow: 3px 3px 8px rgba(0, 0, 0, 0.4); } -.comfy-menu button { +.comfy-menu button, +.comfy-modal button { font-size: 20px; } @@ -130,7 +115,8 @@ body { .comfy-menu > button, .comfy-menu-btns button, -.comfy-menu .comfy-list button { +.comfy-menu .comfy-list button, +.comfy-modal button{ color: #ddd; background-color: #222; border-radius: 8px; @@ -220,11 +206,22 @@ button.comfy-queue-btn { } .comfy-modal.comfy-settings { - background-color: var(--bg-color); - color: var(--fg-color); + text-align: center; + font-family: sans-serif; + color: #999; z-index: 99; } +.comfy-modal input, +.comfy-modal select { + color: #ddd; + background-color: #222; + border-radius: 8px; + border-color: #4e4e4e; + border-style: solid; + font-size: inherit; +} + @media only screen and (max-height: 850px) { .comfy-menu { top: 0 !important; @@ -239,26 +236,26 @@ button.comfy-queue-btn { } .graphdialog { - min-height: 1em; + min-height: 1em; } .graphdialog .name { - font-size: 14px; - font-family: sans-serif; - color: #999999; + font-size: 14px; + font-family: sans-serif; + color: #999999; } .graphdialog button { - margin-top: unset; - vertical-align: unset; - height: 1.6em; - padding-right: 8px; + margin-top: unset; + vertical-align: unset; + height: 1.6em; + padding-right: 8px; } .graphdialog input, .graphdialog textarea, .graphdialog select { - background-color: #222; - border: 2px solid; - border-color: #444444; - color: #ddd; - border-radius: 12px 0 0 12px; + background-color: #222; + border: 2px solid; + border-color: #444444; + color: #ddd; + border-radius: 12px 0 0 12px; } From dd29966f8a2973529ea50de2ef3d0e7c72b5114e Mon Sep 17 00:00:00 2001 From: EllangoK Date: Wed, 5 Apr 2023 20:32:59 -0400 Subject: [PATCH 38/62] changes main.py to use argparse --- main.py | 118 ++++++++++++++++++++++---------------------------------- 1 file changed, 47 insertions(+), 71 deletions(-) diff --git a/main.py b/main.py index a3549b86f..20c8a49e8 100644 --- a/main.py +++ b/main.py @@ -1,57 +1,54 @@ -import os -import sys -import shutil - -import threading +import argparse import asyncio +import os +import shutil +import sys +import threading if os.name == "nt": import logging logging.getLogger("xformers").addFilter(lambda record: 'A matching Triton is not available' not in record.getMessage()) if __name__ == "__main__": - if '--help' in sys.argv: - print() - print("Valid Command line Arguments:") - print("\t--listen [ip]\t\t\tListen on ip or 0.0.0.0 if none given so the UI can be accessed from other computers.") - print("\t--port 8188\t\t\tSet the listen port.") - print() - print("\t--extra-model-paths-config file.yaml\tload an extra_model_paths.yaml file.") - print("\t--output-directory path/to/output\tSet the ComfyUI output directory.") - print() - print() - print("\t--dont-upcast-attention\t\tDisable upcasting of attention \n\t\t\t\t\tcan boost speed but increase the chances of black images.\n") - print("\t--use-split-cross-attention\tUse the split cross attention optimization instead of the sub-quadratic one.\n\t\t\t\t\tIgnored when xformers is used.") - print("\t--use-pytorch-cross-attention\tUse the new pytorch 2.0 cross attention function.") - print("\t--disable-xformers\t\tdisables xformers") - print("\t--cuda-device 1\t\tSet the id of the cuda device this instance will use.") - print() - print("\t--highvram\t\t\tBy default models will be unloaded to CPU memory after being used.\n\t\t\t\t\tThis option keeps them in GPU memory.\n") - print("\t--normalvram\t\t\tUsed to force normal vram use if lowvram gets automatically enabled.") - print("\t--lowvram\t\t\tSplit the unet in parts to use less vram.") - print("\t--novram\t\t\tWhen lowvram isn't enough.") - print() - print("\t--cpu\t\t\tTo use the CPU for everything (slow).") - exit() + parser = argparse.ArgumentParser(description="Script Arguments") - if '--dont-upcast-attention' in sys.argv: + parser.add_argument("--listen", type=str, default="127.0.0.1", help="Listen on IP or 0.0.0.0 if none given so the UI can be accessed from other computers.") + parser.add_argument("--port", type=int, default=8188, help="Set the listen port.") + parser.add_argument("--extra-model-paths-config", type=str, default=None, help="Load an extra_model_paths.yaml file.") + parser.add_argument("--output-directory", type=str, default=None, help="Set the ComfyUI output directory.") + parser.add_argument("--dont-upcast-attention", action="store_true", help="Disable upcasting of attention. Can boost speed but increase the chances of black images.") + parser.add_argument("--use-split-cross-attention", action="store_true", help="Use the split cross attention optimization instead of the sub-quadratic one. Ignored when xformers is used.") + parser.add_argument("--use-pytorch-cross-attention", action="store_true", help="Use the new pytorch 2.0 cross attention function.") + parser.add_argument("--disable-xformers", action="store_true", help="Disable xformers.") + parser.add_argument("--cuda-device", type=int, default=None, help="Set the id of the cuda device this instance will use.") + parser.add_argument("--highvram", action="store_true", help="By default models will be unloaded to CPU memory after being used. This option keeps them in GPU memory.") + parser.add_argument("--normalvram", action="store_true", help="Used to force normal vram use if lowvram gets automatically enabled.") + parser.add_argument("--lowvram", action="store_true", help="Split the unet in parts to use less vram.") + parser.add_argument("--novram", action="store_true", help="When lowvram isn't enough.") + parser.add_argument("--cpu", action="store_true", help="To use the CPU for everything (slow).") + parser.add_argument("--dont-print-server", action="store_true", help="Don't print server output.") + parser.add_argument("--quick-test-for-ci", action="store_true", help="Quick test for CI.") + parser.add_argument("--windows-standalone-build", action="store_true", help="Windows standalone build.") + + args = parser.parse_args() + + if args.dont_upcast_attention: print("disabling upcasting of attention") os.environ['ATTN_PRECISION'] = "fp16" - try: - index = sys.argv.index('--cuda-device') - device = sys.argv[index + 1] - os.environ['CUDA_VISIBLE_DEVICES'] = device - print("Set cuda device to:", device) - except: - pass + if args.cuda_device is not None: + os.environ['CUDA_VISIBLE_DEVICES'] = str(args.cuda_device) + print("Set cuda device to:", args.cuda_device) + -from nodes import init_custom_nodes -import execution -import server -import folder_paths import yaml +import execution +import folder_paths +import server +from nodes import init_custom_nodes + + def prompt_worker(q, server): e = execution.PromptExecutor(server) while True: @@ -110,51 +107,30 @@ if __name__ == "__main__": hijack_progress(server) threading.Thread(target=prompt_worker, daemon=True, args=(q,server,)).start() - try: - address = '0.0.0.0' - p_index = sys.argv.index('--listen') - try: - ip = sys.argv[p_index + 1] - if ip[:2] != '--': - address = ip - except: - pass - except: - address = '127.0.0.1' - dont_print = False - if '--dont-print-server' in sys.argv: - dont_print = True + address = args.listen + + dont_print = args.dont_print_server extra_model_paths_config_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "extra_model_paths.yaml") if os.path.isfile(extra_model_paths_config_path): load_extra_path_config(extra_model_paths_config_path) - if '--extra-model-paths-config' in sys.argv: - indices = [(i + 1) for i in range(len(sys.argv) - 1) if sys.argv[i] == '--extra-model-paths-config'] - for i in indices: - load_extra_path_config(sys.argv[i]) + if args.extra_model_paths_config: + load_extra_path_config(args.extra_model_paths_config) - try: - output_dir = sys.argv[sys.argv.index('--output-directory') + 1] - output_dir = os.path.abspath(output_dir) + if args.output_directory: + output_dir = os.path.abspath(args.output_directory) print("setting output directory to:", output_dir) folder_paths.set_output_directory(output_dir) - except: - pass - port = 8188 - try: - p_index = sys.argv.index('--port') - port = int(sys.argv[p_index + 1]) - except: - pass + port = args.port - if '--quick-test-for-ci' in sys.argv: + if args.quick_test_for_ci: exit(0) call_on_start = None - if "--windows-standalone-build" in sys.argv: + if args.windows_standalone_build: def startup_server(address, port): import webbrowser webbrowser.open("http://{}:{}".format(address, port)) From e5e587b1c0c5dc728d65b3e84592445cdb5e6e9b Mon Sep 17 00:00:00 2001 From: EllangoK Date: Wed, 5 Apr 2023 23:41:23 -0400 Subject: [PATCH 39/62] seperates out arg parser and imports args --- comfy/cli_args.py | 29 +++++++++ comfy/ldm/modules/attention.py | 5 +- comfy/model_management.py | 111 ++++++++++++++++----------------- main.py | 27 +------- 4 files changed, 88 insertions(+), 84 deletions(-) create mode 100644 comfy/cli_args.py diff --git a/comfy/cli_args.py b/comfy/cli_args.py new file mode 100644 index 000000000..6a56e315c --- /dev/null +++ b/comfy/cli_args.py @@ -0,0 +1,29 @@ +import argparse + +parser = argparse.ArgumentParser() + +parser.add_argument("--listen", type=str, default="127.0.0.1", help="Listen on IP or 127.0.0.1 if none given so the UI can be accessed from other computers.") +parser.add_argument("--port", type=int, default=8188, help="Set the listen port.") +parser.add_argument("--extra-model-paths-config", type=str, default=None, help="Load an extra_model_paths.yaml file.") +parser.add_argument("--output-directory", type=str, default=None, help="Set the ComfyUI output directory.") +parser.add_argument("--dont-upcast-attention", action="store_true", help="Disable upcasting of attention. Can boost speed but increase the chances of black images.") + +attn_group = parser.add_mutually_exclusive_group() +attn_group.add_argument("--use-split-cross-attention", action="store_true", help="Use the split cross attention optimization instead of the sub-quadratic one. Ignored when xformers is used.") +attn_group.add_argument("--use-pytorch-cross-attention", action="store_true", help="Use the new pytorch 2.0 cross attention function.") + +parser.add_argument("--disable-xformers", action="store_true", help="Disable xformers.") +parser.add_argument("--cuda-device", type=int, default=None, help="Set the id of the cuda device this instance will use.") + +vram_group = parser.add_mutually_exclusive_group() +vram_group.add_argument("--highvram", action="store_true", help="By default models will be unloaded to CPU memory after being used. This option keeps them in GPU memory.") +vram_group.add_argument("--normalvram", action="store_true", help="Used to force normal vram use if lowvram gets automatically enabled.") +vram_group.add_argument("--lowvram", action="store_true", help="Split the unet in parts to use less vram.") +vram_group.add_argument("--novram", action="store_true", help="When lowvram isn't enough.") +vram_group.add_argument("--cpu", action="store_true", help="To use the CPU for everything (slow).") + +parser.add_argument("--dont-print-server", action="store_true", help="Don't print server output.") +parser.add_argument("--quick-test-for-ci", action="store_true", help="Quick test for CI.") +parser.add_argument("--windows-standalone-build", action="store_true", help="Windows standalone build.") + +args = parser.parse_args() diff --git a/comfy/ldm/modules/attention.py b/comfy/ldm/modules/attention.py index 07553627c..92b3eca7c 100644 --- a/comfy/ldm/modules/attention.py +++ b/comfy/ldm/modules/attention.py @@ -21,6 +21,8 @@ if model_management.xformers_enabled(): import os _ATTN_PRECISION = os.environ.get("ATTN_PRECISION", "fp32") +from cli_args import args + def exists(val): return val is not None @@ -474,7 +476,6 @@ class CrossAttentionPytorch(nn.Module): return self.to_out(out) -import sys if model_management.xformers_enabled(): print("Using xformers cross attention") CrossAttention = MemoryEfficientCrossAttention @@ -482,7 +483,7 @@ elif model_management.pytorch_attention_enabled(): print("Using pytorch cross attention") CrossAttention = CrossAttentionPytorch else: - if "--use-split-cross-attention" in sys.argv: + if args.use_split_cross_attention: print("Using split optimization for cross attention") CrossAttention = CrossAttentionDoggettx else: diff --git a/comfy/model_management.py b/comfy/model_management.py index 052dfb775..7dda073dc 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -1,36 +1,35 @@ +import psutil +from enum import Enum +from cli_args import args -CPU = 0 -NO_VRAM = 1 -LOW_VRAM = 2 -NORMAL_VRAM = 3 -HIGH_VRAM = 4 -MPS = 5 +class VRAMState(Enum): + CPU = 0 + NO_VRAM = 1 + LOW_VRAM = 2 + NORMAL_VRAM = 3 + HIGH_VRAM = 4 + MPS = 5 -accelerate_enabled = False -vram_state = NORMAL_VRAM +# Determine VRAM State +vram_state = VRAMState.NORMAL_VRAM +set_vram_to = VRAMState.NORMAL_VRAM total_vram = 0 total_vram_available_mb = -1 -import sys -import psutil - -forced_cpu = "--cpu" in sys.argv - -set_vram_to = NORMAL_VRAM +accelerate_enabled = False try: import torch total_vram = torch.cuda.mem_get_info(torch.cuda.current_device())[1] / (1024 * 1024) total_ram = psutil.virtual_memory().total / (1024 * 1024) - forced_normal_vram = "--normalvram" in sys.argv - if not forced_normal_vram and not forced_cpu: + if not args.normalvram and not args.cpu: if total_vram <= 4096: print("Trying to enable lowvram mode because your GPU seems to have 4GB or less. If you don't want this use: --normalvram") - set_vram_to = LOW_VRAM + set_vram_to = VRAMState.LOW_VRAM elif total_vram > total_ram * 1.1 and total_vram > 14336: print("Enabling highvram mode because your GPU has more vram than your computer has ram. If you don't want this use: --normalvram") - vram_state = HIGH_VRAM + vram_state = VRAMState.HIGH_VRAM except: pass @@ -39,34 +38,32 @@ try: except: OOM_EXCEPTION = Exception -if "--disable-xformers" in sys.argv: - XFORMERS_IS_AVAILBLE = False +if args.disable_xformers: + XFORMERS_IS_AVAILABLE = False else: try: import xformers import xformers.ops - XFORMERS_IS_AVAILBLE = True + XFORMERS_IS_AVAILABLE = True except: - XFORMERS_IS_AVAILBLE = False + XFORMERS_IS_AVAILABLE = False -ENABLE_PYTORCH_ATTENTION = False -if "--use-pytorch-cross-attention" in sys.argv: +ENABLE_PYTORCH_ATTENTION = args.use_pytorch_cross_attention +if ENABLE_PYTORCH_ATTENTION: torch.backends.cuda.enable_math_sdp(True) torch.backends.cuda.enable_flash_sdp(True) torch.backends.cuda.enable_mem_efficient_sdp(True) - ENABLE_PYTORCH_ATTENTION = True - XFORMERS_IS_AVAILBLE = False + XFORMERS_IS_AVAILABLE = False + +if args.lowvram: + set_vram_to = VRAMState.LOW_VRAM +elif args.novram: + set_vram_to = VRAMState.NO_VRAM +elif args.highvram: + vram_state = VRAMState.HIGH_VRAM -if "--lowvram" in sys.argv: - set_vram_to = LOW_VRAM -if "--novram" in sys.argv: - set_vram_to = NO_VRAM -if "--highvram" in sys.argv: - vram_state = HIGH_VRAM - - -if set_vram_to == LOW_VRAM or set_vram_to == NO_VRAM: +if set_vram_to in (VRAMState.LOW_VRAM, VRAMState.NO_VRAM): try: import accelerate accelerate_enabled = True @@ -81,14 +78,14 @@ if set_vram_to == LOW_VRAM or set_vram_to == NO_VRAM: try: if torch.backends.mps.is_available(): - vram_state = MPS + vram_state = VRAMState.MPS except: pass -if forced_cpu: - vram_state = CPU +if args.cpu: + vram_state = VRAMState.CPU -print("Set vram state to:", ["CPU", "NO VRAM", "LOW VRAM", "NORMAL VRAM", "HIGH VRAM", "MPS"][vram_state]) +print(f"Set vram state to: {vram_state.name}") current_loaded_model = None @@ -109,12 +106,12 @@ def unload_model(): model_accelerated = False #never unload models from GPU on high vram - if vram_state != HIGH_VRAM: + if vram_state != VRAMState.HIGH_VRAM: current_loaded_model.model.cpu() current_loaded_model.unpatch_model() current_loaded_model = None - if vram_state != HIGH_VRAM: + if vram_state != VRAMState.HIGH_VRAM: if len(current_gpu_controlnets) > 0: for n in current_gpu_controlnets: n.cpu() @@ -135,19 +132,19 @@ def load_model_gpu(model): model.unpatch_model() raise e current_loaded_model = model - if vram_state == CPU: + if vram_state == VRAMState.CPU: pass - elif vram_state == MPS: + elif vram_state == VRAMState.MPS: mps_device = torch.device("mps") real_model.to(mps_device) pass - elif vram_state == NORMAL_VRAM or vram_state == HIGH_VRAM: + elif vram_state == VRAMState.NORMAL_VRAM or vram_state == VRAMState.HIGH_VRAM: model_accelerated = False real_model.cuda() else: - if vram_state == NO_VRAM: + if vram_state == VRAMState.NO_VRAM: device_map = accelerate.infer_auto_device_map(real_model, max_memory={0: "256MiB", "cpu": "16GiB"}) - elif vram_state == LOW_VRAM: + elif vram_state == VRAMState.LOW_VRAM: device_map = accelerate.infer_auto_device_map(real_model, max_memory={0: "{}MiB".format(total_vram_available_mb), "cpu": "16GiB"}) accelerate.dispatch_model(real_model, device_map=device_map, main_device="cuda") @@ -157,10 +154,10 @@ def load_model_gpu(model): def load_controlnet_gpu(models): global current_gpu_controlnets global vram_state - if vram_state == CPU: + if vram_state == VRAMState.CPU: return - if vram_state == LOW_VRAM or vram_state == NO_VRAM: + if vram_state == VRAMState.LOW_VRAM or vram_state == VRAMState.NO_VRAM: #don't load controlnets like this if low vram because they will be loaded right before running and unloaded right after return @@ -176,20 +173,20 @@ def load_controlnet_gpu(models): def load_if_low_vram(model): global vram_state - if vram_state == LOW_VRAM or vram_state == NO_VRAM: + if vram_state == VRAMState.LOW_VRAM or vram_state == VRAMState.NO_VRAM: return model.cuda() return model def unload_if_low_vram(model): global vram_state - if vram_state == LOW_VRAM or vram_state == NO_VRAM: + if vram_state == VRAMState.LOW_VRAM or vram_state == VRAMState.NO_VRAM: return model.cpu() return model def get_torch_device(): - if vram_state == MPS: + if vram_state == VRAMState.MPS: return torch.device("mps") - if vram_state == CPU: + if vram_state == VRAMState.CPU: return torch.device("cpu") else: return torch.cuda.current_device() @@ -201,9 +198,9 @@ def get_autocast_device(dev): def xformers_enabled(): - if vram_state == CPU: + if vram_state == VRAMState.CPU: return False - return XFORMERS_IS_AVAILBLE + return XFORMERS_IS_AVAILABLE def xformers_enabled_vae(): @@ -243,7 +240,7 @@ def get_free_memory(dev=None, torch_free_too=False): def maximum_batch_area(): global vram_state - if vram_state == NO_VRAM: + if vram_state == VRAMState.NO_VRAM: return 0 memory_free = get_free_memory() / (1024 * 1024) @@ -252,11 +249,11 @@ def maximum_batch_area(): def cpu_mode(): global vram_state - return vram_state == CPU + return vram_state == VRAMState.CPU def mps_mode(): global vram_state - return vram_state == MPS + return vram_state == VRAMState.MPS def should_use_fp16(): if cpu_mode() or mps_mode(): diff --git a/main.py b/main.py index 20c8a49e8..51a48fc6d 100644 --- a/main.py +++ b/main.py @@ -1,37 +1,14 @@ -import argparse import asyncio import os import shutil -import sys import threading +from comfy.cli_args import args if os.name == "nt": import logging logging.getLogger("xformers").addFilter(lambda record: 'A matching Triton is not available' not in record.getMessage()) if __name__ == "__main__": - parser = argparse.ArgumentParser(description="Script Arguments") - - parser.add_argument("--listen", type=str, default="127.0.0.1", help="Listen on IP or 0.0.0.0 if none given so the UI can be accessed from other computers.") - parser.add_argument("--port", type=int, default=8188, help="Set the listen port.") - parser.add_argument("--extra-model-paths-config", type=str, default=None, help="Load an extra_model_paths.yaml file.") - parser.add_argument("--output-directory", type=str, default=None, help="Set the ComfyUI output directory.") - parser.add_argument("--dont-upcast-attention", action="store_true", help="Disable upcasting of attention. Can boost speed but increase the chances of black images.") - parser.add_argument("--use-split-cross-attention", action="store_true", help="Use the split cross attention optimization instead of the sub-quadratic one. Ignored when xformers is used.") - parser.add_argument("--use-pytorch-cross-attention", action="store_true", help="Use the new pytorch 2.0 cross attention function.") - parser.add_argument("--disable-xformers", action="store_true", help="Disable xformers.") - parser.add_argument("--cuda-device", type=int, default=None, help="Set the id of the cuda device this instance will use.") - parser.add_argument("--highvram", action="store_true", help="By default models will be unloaded to CPU memory after being used. This option keeps them in GPU memory.") - parser.add_argument("--normalvram", action="store_true", help="Used to force normal vram use if lowvram gets automatically enabled.") - parser.add_argument("--lowvram", action="store_true", help="Split the unet in parts to use less vram.") - parser.add_argument("--novram", action="store_true", help="When lowvram isn't enough.") - parser.add_argument("--cpu", action="store_true", help="To use the CPU for everything (slow).") - parser.add_argument("--dont-print-server", action="store_true", help="Don't print server output.") - parser.add_argument("--quick-test-for-ci", action="store_true", help="Quick test for CI.") - parser.add_argument("--windows-standalone-build", action="store_true", help="Windows standalone build.") - - args = parser.parse_args() - if args.dont_upcast_attention: print("disabling upcasting of attention") os.environ['ATTN_PRECISION'] = "fp16" @@ -121,7 +98,7 @@ if __name__ == "__main__": if args.output_directory: output_dir = os.path.abspath(args.output_directory) - print("setting output directory to:", output_dir) + print(f"Setting output directory to: {output_dir}") folder_paths.set_output_directory(output_dir) port = args.port From 84b9c0ac2ff49b5b18b8e7804f8fe42a379a0787 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E8=97=8D+85CD?= <50108258+kwaa@users.noreply.github.com> Date: Thu, 6 Apr 2023 12:27:22 +0800 Subject: [PATCH 40/62] Import intel_extension_for_pytorch as ipex --- comfy/model_management.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index f0b8be55e..379cc18d7 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -87,7 +87,7 @@ except: pass try: - import intel_extension_for_pytorch + import intel_extension_for_pytorch as ipex if torch.xpu.is_available(): vram_state = XPU except: From 7cb924f68469cd2481b2313f8e5fc02587279bf3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E8=97=8D+85CD?= <50108258+kwaa@users.noreply.github.com> Date: Thu, 6 Apr 2023 14:24:47 +0800 Subject: [PATCH 41/62] Use separate variables instead of `vram_state` --- comfy/model_management.py | 70 +++++++++++++++++++++------------------ 1 file changed, 37 insertions(+), 33 deletions(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index 379cc18d7..a84167746 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -5,9 +5,9 @@ LOW_VRAM = 2 NORMAL_VRAM = 3 HIGH_VRAM = 4 MPS = 5 -XPU = 6 accelerate_enabled = False +xpu_available = False vram_state = NORMAL_VRAM total_vram = 0 @@ -22,7 +22,12 @@ set_vram_to = NORMAL_VRAM try: import torch - total_vram = torch.cuda.mem_get_info(torch.cuda.current_device())[1] / (1024 * 1024) + import intel_extension_for_pytorch as ipex + if torch.xpu.is_available(): + xpu_available = True + total_vram = torch.xpu.get_device_properties(torch.xpu.current_device()).total_memory / (1024 * 1024) + else: + total_vram = torch.cuda.mem_get_info(torch.cuda.current_device())[1] / (1024 * 1024) total_ram = psutil.virtual_memory().total / (1024 * 1024) forced_normal_vram = "--normalvram" in sys.argv if not forced_normal_vram and not forced_cpu: @@ -86,17 +91,10 @@ try: except: pass -try: - import intel_extension_for_pytorch as ipex - if torch.xpu.is_available(): - vram_state = XPU -except: - pass - if forced_cpu: vram_state = CPU -print("Set vram state to:", ["CPU", "NO VRAM", "LOW VRAM", "NORMAL VRAM", "HIGH VRAM", "MPS", "XPU"][vram_state]) +print("Set vram state to:", ["CPU", "NO VRAM", "LOW VRAM", "NORMAL VRAM", "HIGH VRAM", "MPS"][vram_state]) current_loaded_model = None @@ -133,6 +131,7 @@ def load_model_gpu(model): global current_loaded_model global vram_state global model_accelerated + global xpu_available if model is current_loaded_model: return @@ -149,19 +148,19 @@ def load_model_gpu(model): mps_device = torch.device("mps") real_model.to(mps_device) pass - elif vram_state == XPU: - real_model.to("xpu") - pass elif vram_state == NORMAL_VRAM or vram_state == HIGH_VRAM: model_accelerated = False - real_model.cuda() + if xpu_available: + real_model.to("xpu") + else: + real_model.cuda() else: if vram_state == NO_VRAM: device_map = accelerate.infer_auto_device_map(real_model, max_memory={0: "256MiB", "cpu": "16GiB"}) elif vram_state == LOW_VRAM: device_map = accelerate.infer_auto_device_map(real_model, max_memory={0: "{}MiB".format(total_vram_available_mb), "cpu": "16GiB"}) - accelerate.dispatch_model(real_model, device_map=device_map, main_device="cuda") + accelerate.dispatch_model(real_model, device_map=device_map, main_device="xpu" if xpu_available else "cuda") model_accelerated = True return current_loaded_model @@ -187,8 +186,12 @@ def load_controlnet_gpu(models): def load_if_low_vram(model): global vram_state + global xpu_available if vram_state == LOW_VRAM or vram_state == NO_VRAM: - return model.cuda() + if xpu_available: + return model.to("xpu") + else: + return model.cuda() return model def unload_if_low_vram(model): @@ -198,14 +201,16 @@ def unload_if_low_vram(model): return model def get_torch_device(): + global xpu_available if vram_state == MPS: return torch.device("mps") - if vram_state == XPU: - return torch.device("xpu") if vram_state == CPU: return torch.device("cpu") else: - return torch.cuda.current_device() + if xpu_available: + return torch.device("xpu") + else: + return torch.cuda.current_device() def get_autocast_device(dev): if hasattr(dev, 'type'): @@ -235,22 +240,24 @@ def pytorch_attention_enabled(): return ENABLE_PYTORCH_ATTENTION def get_free_memory(dev=None, torch_free_too=False): + global xpu_available if dev is None: dev = get_torch_device() if hasattr(dev, 'type') and (dev.type == 'cpu' or dev.type == 'mps'): mem_free_total = psutil.virtual_memory().available mem_free_torch = mem_free_total - elif hasattr(dev, 'type') and (dev.type == 'xpu'): - mem_free_total = torch.xpu.get_device_properties(dev).total_memory - torch.xpu.memory_allocated(dev) - mem_free_torch = mem_free_total else: - stats = torch.cuda.memory_stats(dev) - mem_active = stats['active_bytes.all.current'] - mem_reserved = stats['reserved_bytes.all.current'] - mem_free_cuda, _ = torch.cuda.mem_get_info(dev) - mem_free_torch = mem_reserved - mem_active - mem_free_total = mem_free_cuda + mem_free_torch + if xpu_available: + mem_free_total = torch.xpu.get_device_properties(dev).total_memory - torch.xpu.memory_allocated(dev) + mem_free_torch = mem_free_total + else: + stats = torch.cuda.memory_stats(dev) + mem_active = stats['active_bytes.all.current'] + mem_reserved = stats['reserved_bytes.all.current'] + mem_free_cuda, _ = torch.cuda.mem_get_info(dev) + mem_free_torch = mem_reserved - mem_active + mem_free_total = mem_free_cuda + mem_free_torch if torch_free_too: return (mem_free_total, mem_free_torch) @@ -274,12 +281,9 @@ def mps_mode(): global vram_state return vram_state == MPS -def xpu_mode(): - global vram_state - return vram_state == XPU - def should_use_fp16(): - if cpu_mode() or mps_mode() or xpu_mode(): + global xpu_available + if cpu_mode() or mps_mode() or xpu_available: return False #TODO ? if torch.cuda.is_bf16_supported(): From 60127a83040b3b243457980d04f3bb25c4491978 Mon Sep 17 00:00:00 2001 From: sALTaccount Date: Wed, 5 Apr 2023 23:57:31 -0700 Subject: [PATCH 42/62] diffusers loader --- comfy/diffusers_convert.py | 364 +++++++++++++++++++++ models/diffusers/put_diffusers_models_here | 0 nodes.py | 19 +- 3 files changed, 382 insertions(+), 1 deletion(-) create mode 100644 comfy/diffusers_convert.py create mode 100644 models/diffusers/put_diffusers_models_here diff --git a/comfy/diffusers_convert.py b/comfy/diffusers_convert.py new file mode 100644 index 000000000..a31c1c11b --- /dev/null +++ b/comfy/diffusers_convert.py @@ -0,0 +1,364 @@ +import json +import os +import yaml + +# because of local import nonsense +import sys +sys.path.append(os.path.dirname(os.path.realpath(__file__))) + +import folder_paths +from comfy.ldm.util import instantiate_from_config +from comfy.sd import ModelPatcher, load_model_weights, CLIP, VAE +import os.path as osp +import re +import torch +from safetensors.torch import load_file, save_file + +# conversion code from https://github.com/huggingface/diffusers/blob/main/scripts/convert_diffusers_to_original_stable_diffusion.py + +# =================# +# UNet Conversion # +# =================# + +unet_conversion_map = [ + # (stable-diffusion, HF Diffusers) + ("time_embed.0.weight", "time_embedding.linear_1.weight"), + ("time_embed.0.bias", "time_embedding.linear_1.bias"), + ("time_embed.2.weight", "time_embedding.linear_2.weight"), + ("time_embed.2.bias", "time_embedding.linear_2.bias"), + ("input_blocks.0.0.weight", "conv_in.weight"), + ("input_blocks.0.0.bias", "conv_in.bias"), + ("out.0.weight", "conv_norm_out.weight"), + ("out.0.bias", "conv_norm_out.bias"), + ("out.2.weight", "conv_out.weight"), + ("out.2.bias", "conv_out.bias"), +] + +unet_conversion_map_resnet = [ + # (stable-diffusion, HF Diffusers) + ("in_layers.0", "norm1"), + ("in_layers.2", "conv1"), + ("out_layers.0", "norm2"), + ("out_layers.3", "conv2"), + ("emb_layers.1", "time_emb_proj"), + ("skip_connection", "conv_shortcut"), +] + +unet_conversion_map_layer = [] +# hardcoded number of downblocks and resnets/attentions... +# would need smarter logic for other networks. +for i in range(4): + # loop over downblocks/upblocks + + for j in range(2): + # loop over resnets/attentions for downblocks + hf_down_res_prefix = f"down_blocks.{i}.resnets.{j}." + sd_down_res_prefix = f"input_blocks.{3 * i + j + 1}.0." + unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix)) + + if i < 3: + # no attention layers in down_blocks.3 + hf_down_atn_prefix = f"down_blocks.{i}.attentions.{j}." + sd_down_atn_prefix = f"input_blocks.{3 * i + j + 1}.1." + unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix)) + + for j in range(3): + # loop over resnets/attentions for upblocks + hf_up_res_prefix = f"up_blocks.{i}.resnets.{j}." + sd_up_res_prefix = f"output_blocks.{3 * i + j}.0." + unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix)) + + if i > 0: + # no attention layers in up_blocks.0 + hf_up_atn_prefix = f"up_blocks.{i}.attentions.{j}." + sd_up_atn_prefix = f"output_blocks.{3 * i + j}.1." + unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix)) + + if i < 3: + # no downsample in down_blocks.3 + hf_downsample_prefix = f"down_blocks.{i}.downsamplers.0.conv." + sd_downsample_prefix = f"input_blocks.{3 * (i + 1)}.0.op." + unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix)) + + # no upsample in up_blocks.3 + hf_upsample_prefix = f"up_blocks.{i}.upsamplers.0." + sd_upsample_prefix = f"output_blocks.{3 * i + 2}.{1 if i == 0 else 2}." + unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix)) + +hf_mid_atn_prefix = "mid_block.attentions.0." +sd_mid_atn_prefix = "middle_block.1." +unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix)) + +for j in range(2): + hf_mid_res_prefix = f"mid_block.resnets.{j}." + sd_mid_res_prefix = f"middle_block.{2 * j}." + unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix)) + + +def convert_unet_state_dict(unet_state_dict): + # buyer beware: this is a *brittle* function, + # and correct output requires that all of these pieces interact in + # the exact order in which I have arranged them. + mapping = {k: k for k in unet_state_dict.keys()} + for sd_name, hf_name in unet_conversion_map: + mapping[hf_name] = sd_name + for k, v in mapping.items(): + if "resnets" in k: + for sd_part, hf_part in unet_conversion_map_resnet: + v = v.replace(hf_part, sd_part) + mapping[k] = v + for k, v in mapping.items(): + for sd_part, hf_part in unet_conversion_map_layer: + v = v.replace(hf_part, sd_part) + mapping[k] = v + new_state_dict = {v: unet_state_dict[k] for k, v in mapping.items()} + return new_state_dict + + +# ================# +# VAE Conversion # +# ================# + +vae_conversion_map = [ + # (stable-diffusion, HF Diffusers) + ("nin_shortcut", "conv_shortcut"), + ("norm_out", "conv_norm_out"), + ("mid.attn_1.", "mid_block.attentions.0."), +] + +for i in range(4): + # down_blocks have two resnets + for j in range(2): + hf_down_prefix = f"encoder.down_blocks.{i}.resnets.{j}." + sd_down_prefix = f"encoder.down.{i}.block.{j}." + vae_conversion_map.append((sd_down_prefix, hf_down_prefix)) + + if i < 3: + hf_downsample_prefix = f"down_blocks.{i}.downsamplers.0." + sd_downsample_prefix = f"down.{i}.downsample." + vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix)) + + hf_upsample_prefix = f"up_blocks.{i}.upsamplers.0." + sd_upsample_prefix = f"up.{3 - i}.upsample." + vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix)) + + # up_blocks have three resnets + # also, up blocks in hf are numbered in reverse from sd + for j in range(3): + hf_up_prefix = f"decoder.up_blocks.{i}.resnets.{j}." + sd_up_prefix = f"decoder.up.{3 - i}.block.{j}." + vae_conversion_map.append((sd_up_prefix, hf_up_prefix)) + +# this part accounts for mid blocks in both the encoder and the decoder +for i in range(2): + hf_mid_res_prefix = f"mid_block.resnets.{i}." + sd_mid_res_prefix = f"mid.block_{i + 1}." + vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix)) + +vae_conversion_map_attn = [ + # (stable-diffusion, HF Diffusers) + ("norm.", "group_norm."), + ("q.", "query."), + ("k.", "key."), + ("v.", "value."), + ("proj_out.", "proj_attn."), +] + + +def reshape_weight_for_sd(w): + # convert HF linear weights to SD conv2d weights + return w.reshape(*w.shape, 1, 1) + + +def convert_vae_state_dict(vae_state_dict): + mapping = {k: k for k in vae_state_dict.keys()} + for k, v in mapping.items(): + for sd_part, hf_part in vae_conversion_map: + v = v.replace(hf_part, sd_part) + mapping[k] = v + for k, v in mapping.items(): + if "attentions" in k: + for sd_part, hf_part in vae_conversion_map_attn: + v = v.replace(hf_part, sd_part) + mapping[k] = v + new_state_dict = {v: vae_state_dict[k] for k, v in mapping.items()} + weights_to_convert = ["q", "k", "v", "proj_out"] + for k, v in new_state_dict.items(): + for weight_name in weights_to_convert: + if f"mid.attn_1.{weight_name}.weight" in k: + print(f"Reshaping {k} for SD format") + new_state_dict[k] = reshape_weight_for_sd(v) + return new_state_dict + + +# =========================# +# Text Encoder Conversion # +# =========================# + + +textenc_conversion_lst = [ + # (stable-diffusion, HF Diffusers) + ("resblocks.", "text_model.encoder.layers."), + ("ln_1", "layer_norm1"), + ("ln_2", "layer_norm2"), + (".c_fc.", ".fc1."), + (".c_proj.", ".fc2."), + (".attn", ".self_attn"), + ("ln_final.", "transformer.text_model.final_layer_norm."), + ("token_embedding.weight", "transformer.text_model.embeddings.token_embedding.weight"), + ("positional_embedding", "transformer.text_model.embeddings.position_embedding.weight"), +] +protected = {re.escape(x[1]): x[0] for x in textenc_conversion_lst} +textenc_pattern = re.compile("|".join(protected.keys())) + +# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp +code2idx = {"q": 0, "k": 1, "v": 2} + + +def convert_text_enc_state_dict_v20(text_enc_dict): + new_state_dict = {} + capture_qkv_weight = {} + capture_qkv_bias = {} + for k, v in text_enc_dict.items(): + if ( + k.endswith(".self_attn.q_proj.weight") + or k.endswith(".self_attn.k_proj.weight") + or k.endswith(".self_attn.v_proj.weight") + ): + k_pre = k[: -len(".q_proj.weight")] + k_code = k[-len("q_proj.weight")] + if k_pre not in capture_qkv_weight: + capture_qkv_weight[k_pre] = [None, None, None] + capture_qkv_weight[k_pre][code2idx[k_code]] = v + continue + + if ( + k.endswith(".self_attn.q_proj.bias") + or k.endswith(".self_attn.k_proj.bias") + or k.endswith(".self_attn.v_proj.bias") + ): + k_pre = k[: -len(".q_proj.bias")] + k_code = k[-len("q_proj.bias")] + if k_pre not in capture_qkv_bias: + capture_qkv_bias[k_pre] = [None, None, None] + capture_qkv_bias[k_pre][code2idx[k_code]] = v + continue + + relabelled_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], k) + new_state_dict[relabelled_key] = v + + for k_pre, tensors in capture_qkv_weight.items(): + if None in tensors: + raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing") + relabelled_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], k_pre) + new_state_dict[relabelled_key + ".in_proj_weight"] = torch.cat(tensors) + + for k_pre, tensors in capture_qkv_bias.items(): + if None in tensors: + raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing") + relabelled_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], k_pre) + new_state_dict[relabelled_key + ".in_proj_bias"] = torch.cat(tensors) + + return new_state_dict + + +def convert_text_enc_state_dict(text_enc_dict): + return text_enc_dict + + +def load_diffusers(model_path, fp16=True, output_vae=True, output_clip=True, embedding_directory=None): + diffusers_unet_conf = json.load(open(osp.join(model_path, "unet/config.json"))) + diffusers_scheduler_conf = json.load(open(osp.join(model_path, "scheduler/scheduler_config.json"))) + + # magic + v2 = diffusers_unet_conf["sample_size"] == 96 + v_pred = diffusers_scheduler_conf['prediction_type'] == 'v_prediction' + + if v2: + if v_pred: + config_path = folder_paths.get_full_path("configs", 'v2-inference-v.yaml') + else: + config_path = folder_paths.get_full_path("configs", 'v2-inference.yaml') + else: + config_path = folder_paths.get_full_path("configs", 'v1-inference.yaml') + + with open(config_path, 'r') as stream: + config = yaml.safe_load(stream) + + model_config_params = config['model']['params'] + clip_config = model_config_params['cond_stage_config'] + scale_factor = model_config_params['scale_factor'] + vae_config = model_config_params['first_stage_config'] + vae_config['scale_factor'] = scale_factor + + unet_path = osp.join(model_path, "unet", "diffusion_pytorch_model.safetensors") + vae_path = osp.join(model_path, "vae", "diffusion_pytorch_model.safetensors") + text_enc_path = osp.join(model_path, "text_encoder", "model.safetensors") + + # Load models from safetensors if it exists, if it doesn't pytorch + if osp.exists(unet_path): + unet_state_dict = load_file(unet_path, device="cpu") + else: + unet_path = osp.join(model_path, "unet", "diffusion_pytorch_model.bin") + unet_state_dict = torch.load(unet_path, map_location="cpu") + + if osp.exists(vae_path): + vae_state_dict = load_file(vae_path, device="cpu") + else: + vae_path = osp.join(model_path, "vae", "diffusion_pytorch_model.bin") + vae_state_dict = torch.load(vae_path, map_location="cpu") + + if osp.exists(text_enc_path): + text_enc_dict = load_file(text_enc_path, device="cpu") + else: + text_enc_path = osp.join(model_path, "text_encoder", "pytorch_model.bin") + text_enc_dict = torch.load(text_enc_path, map_location="cpu") + + # Convert the UNet model + unet_state_dict = convert_unet_state_dict(unet_state_dict) + unet_state_dict = {"model.diffusion_model." + k: v for k, v in unet_state_dict.items()} + + # Convert the VAE model + vae_state_dict = convert_vae_state_dict(vae_state_dict) + vae_state_dict = {"first_stage_model." + k: v for k, v in vae_state_dict.items()} + + # Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper + is_v20_model = "text_model.encoder.layers.22.layer_norm2.bias" in text_enc_dict + + if is_v20_model: + # Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm + text_enc_dict = {"transformer." + k: v for k, v in text_enc_dict.items()} + text_enc_dict = convert_text_enc_state_dict_v20(text_enc_dict) + text_enc_dict = {"cond_stage_model.model." + k: v for k, v in text_enc_dict.items()} + else: + text_enc_dict = convert_text_enc_state_dict(text_enc_dict) + text_enc_dict = {"cond_stage_model.transformer." + k: v for k, v in text_enc_dict.items()} + + # Put together new checkpoint + sd = {**unet_state_dict, **vae_state_dict, **text_enc_dict} + + clip = None + vae = None + + class WeightsLoader(torch.nn.Module): + pass + + w = WeightsLoader() + load_state_dict_to = [] + if output_vae: + vae = VAE(scale_factor=scale_factor, config=vae_config) + w.first_stage_model = vae.first_stage_model + load_state_dict_to = [w] + + if output_clip: + clip = CLIP(config=clip_config, embedding_directory=embedding_directory) + w.cond_stage_model = clip.cond_stage_model + load_state_dict_to = [w] + + model = instantiate_from_config(config["model"]) + model = load_model_weights(model, sd, verbose=False, load_state_dict_to=load_state_dict_to) + + if fp16: + model = model.half() + + return ModelPatcher(model), clip, vae diff --git a/models/diffusers/put_diffusers_models_here b/models/diffusers/put_diffusers_models_here new file mode 100644 index 000000000..e69de29bb diff --git a/nodes.py b/nodes.py index 187d54a11..776bc3819 100644 --- a/nodes.py +++ b/nodes.py @@ -4,13 +4,14 @@ import os import sys import json import hashlib -import copy import traceback from PIL import Image from PIL.PngImagePlugin import PngInfo import numpy as np +from comfy.diffusers_convert import load_diffusers + sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy")) @@ -219,6 +220,21 @@ class CheckpointLoaderSimple: out = comfy.sd.load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, embedding_directory=folder_paths.get_folder_paths("embeddings")) return out +class DiffusersLoader: + @classmethod + def INPUT_TYPES(cls): + return {"required": {"model_path": (os.listdir(os.path.join(folder_paths.models_dir, 'diffusers'), ),), + }} + RETURN_TYPES = ("MODEL", "CLIP", "VAE") + FUNCTION = "load_checkpoint" + + CATEGORY = "loaders" + + def load_checkpoint(self, model_path, output_vae=True, output_clip=True): + model_path = os.path.join(folder_paths.models_dir, 'diffusers', model_path) + return load_diffusers(model_path, fp16=True, output_vae=output_vae, output_clip=output_clip, embedding_directory=folder_paths.get_folder_paths("embeddings")) + + class unCLIPCheckpointLoader: @classmethod def INPUT_TYPES(s): @@ -1076,6 +1092,7 @@ NODE_CLASS_MAPPINGS = { "TomePatchModel": TomePatchModel, "unCLIPCheckpointLoader": unCLIPCheckpointLoader, "CheckpointLoader": CheckpointLoader, + "DiffusersLoader": DiffusersLoader, } def load_custom_node(module_path): From c418d988ba59b3114770a0fa111d301f04880fca Mon Sep 17 00:00:00 2001 From: sALTaccount Date: Wed, 5 Apr 2023 23:59:03 -0700 Subject: [PATCH 43/62] update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 0f7d24c45..90931141d 100644 --- a/README.md +++ b/README.md @@ -14,7 +14,7 @@ This ui will let you design and execute advanced stable diffusion pipelines usin - Many optimizations: Only re-executes the parts of the workflow that changes between executions. - Command line option: ```--lowvram``` to make it work on GPUs with less than 3GB vram (enabled automatically on GPUs with low vram) - Works even if you don't have a GPU with: ```--cpu``` (slow) -- Can load both ckpt and safetensors models/checkpoints. Standalone VAEs and CLIP models. +- Can load ckpt, safetensors and diffusers models/checkpoints. Standalone VAEs and CLIP models. - Embeddings/Textual inversion - [Loras (regular, locon and loha)](https://comfyanonymous.github.io/ComfyUI_examples/lora/) - Loading full workflows (with seeds) from generated PNG files. From 3d16077e3806b0817b1d43dc14f61e5dee5495c8 Mon Sep 17 00:00:00 2001 From: sALTaccount Date: Thu, 6 Apr 2023 00:24:52 -0700 Subject: [PATCH 44/62] empty list if diffusers directory doesn't exist --- nodes.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/nodes.py b/nodes.py index 776bc3819..1af62887d 100644 --- a/nodes.py +++ b/nodes.py @@ -223,8 +223,11 @@ class CheckpointLoaderSimple: class DiffusersLoader: @classmethod def INPUT_TYPES(cls): - return {"required": {"model_path": (os.listdir(os.path.join(folder_paths.models_dir, 'diffusers'), ),), - }} + paths = [] + search_path = os.path.join(folder_paths.models_dir, 'diffusers') + if os.path.exists(search_path): + paths = next(os.walk(search_path))[1] + return {"required": {"model_path": (paths,), }} RETURN_TYPES = ("MODEL", "CLIP", "VAE") FUNCTION = "load_checkpoint" From 42fd67b5cb0de9bd1228af7a93dec08b2f1486c3 Mon Sep 17 00:00:00 2001 From: sALTaccount Date: Thu, 6 Apr 2023 00:28:06 -0700 Subject: [PATCH 45/62] use precision determined by model management --- nodes.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nodes.py b/nodes.py index 1af62887d..8271da04c 100644 --- a/nodes.py +++ b/nodes.py @@ -235,7 +235,7 @@ class DiffusersLoader: def load_checkpoint(self, model_path, output_vae=True, output_clip=True): model_path = os.path.join(folder_paths.models_dir, 'diffusers', model_path) - return load_diffusers(model_path, fp16=True, output_vae=output_vae, output_clip=output_clip, embedding_directory=folder_paths.get_folder_paths("embeddings")) + return load_diffusers(model_path, fp16=model_management.should_use_fp16(), output_vae=output_vae, output_clip=output_clip, embedding_directory=folder_paths.get_folder_paths("embeddings")) class unCLIPCheckpointLoader: From 3e2608e12b312fd5d2396d4146d992cd4f8b9ab4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E8=97=8D+85CD?= <50108258+kwaa@users.noreply.github.com> Date: Thu, 6 Apr 2023 15:44:05 +0800 Subject: [PATCH 46/62] Fix auto lowvram detection on CUDA --- comfy/model_management.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index a84167746..b0123b5fc 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -22,11 +22,12 @@ set_vram_to = NORMAL_VRAM try: import torch - import intel_extension_for_pytorch as ipex - if torch.xpu.is_available(): - xpu_available = True - total_vram = torch.xpu.get_device_properties(torch.xpu.current_device()).total_memory / (1024 * 1024) - else: + try: + import intel_extension_for_pytorch as ipex + if torch.xpu.is_available(): + xpu_available = True + total_vram = torch.xpu.get_device_properties(torch.xpu.current_device()).total_memory / (1024 * 1024) + except: total_vram = torch.cuda.mem_get_info(torch.cuda.current_device())[1] / (1024 * 1024) total_ram = psutil.virtual_memory().total / (1024 * 1024) forced_normal_vram = "--normalvram" in sys.argv From 01c1fc669fb8cd41f627dad871257acbaaf24b47 Mon Sep 17 00:00:00 2001 From: EllangoK Date: Thu, 6 Apr 2023 13:19:00 -0400 Subject: [PATCH 47/62] set listen flag to listen on all if specifed --- comfy/cli_args.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/cli_args.py b/comfy/cli_args.py index 6a56e315c..a27dc7a7f 100644 --- a/comfy/cli_args.py +++ b/comfy/cli_args.py @@ -2,7 +2,7 @@ import argparse parser = argparse.ArgumentParser() -parser.add_argument("--listen", type=str, default="127.0.0.1", help="Listen on IP or 127.0.0.1 if none given so the UI can be accessed from other computers.") +parser.add_argument("--listen", nargs="?", const="0.0.0.0", default="127.0.0.1", type=str, help="Specify the IP address to listen on (default: 127.0.0.1). If --listen is provided without an argument, it defaults to 0.0.0.0. (listens on all)") parser.add_argument("--port", type=int, default=8188, help="Set the listen port.") parser.add_argument("--extra-model-paths-config", type=str, default=None, help="Load an extra_model_paths.yaml file.") parser.add_argument("--output-directory", type=str, default=None, help="Set the ComfyUI output directory.") From 7d62d89f9325348179fc9b0db146ff50fa7c808c Mon Sep 17 00:00:00 2001 From: EllangoK Date: Wed, 5 Apr 2023 13:08:08 -0400 Subject: [PATCH 48/62] add cors middleware --- server.py | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/server.py b/server.py index 840d9a4e7..005bf9b2c 100644 --- a/server.py +++ b/server.py @@ -27,6 +27,19 @@ async def cache_control(request: web.Request, handler): response.headers.setdefault('Cache-Control', 'no-cache') return response +@web.middleware +async def cors_middleware(request: web.Request, handler): + if request.method == "OPTIONS": + # Pre-flight request. Reply successfully: + response = web.Response() + else: + response = await handler(request) + response.headers['Access-Control-Allow-Origin'] = '*' + response.headers['Access-Control-Allow-Methods'] = 'POST, GET, DELETE, PUT, OPTIONS' + response.headers['Access-Control-Allow-Headers'] = 'Content-Type, Authorization' + response.headers['Access-Control-Allow-Credentials'] = 'true' + return response + class PromptServer(): def __init__(self, loop): PromptServer.instance = self @@ -37,7 +50,7 @@ class PromptServer(): self.loop = loop self.messages = asyncio.Queue() self.number = 0 - self.app = web.Application(client_max_size=20971520, middlewares=[cache_control]) + self.app = web.Application(client_max_size=20971520, middlewares=[cache_control, cors_middleware]) self.sockets = dict() self.web_root = os.path.join(os.path.dirname( os.path.realpath(__file__)), "web") From 48efae16084b423166f9a1930b989489169d22cf Mon Sep 17 00:00:00 2001 From: EllangoK Date: Thu, 6 Apr 2023 15:06:22 -0400 Subject: [PATCH 49/62] makes cors a cli parameter --- comfy/cli_args.py | 3 ++- server.py | 36 +++++++++++++++++++++++------------- 2 files changed, 25 insertions(+), 14 deletions(-) diff --git a/comfy/cli_args.py b/comfy/cli_args.py index a27dc7a7f..5133e0ae5 100644 --- a/comfy/cli_args.py +++ b/comfy/cli_args.py @@ -4,8 +4,10 @@ parser = argparse.ArgumentParser() parser.add_argument("--listen", nargs="?", const="0.0.0.0", default="127.0.0.1", type=str, help="Specify the IP address to listen on (default: 127.0.0.1). If --listen is provided without an argument, it defaults to 0.0.0.0. (listens on all)") parser.add_argument("--port", type=int, default=8188, help="Set the listen port.") +parser.add_argument("--cors", default=None, nargs="?", const="*", help="Enable CORS (Cross-Origin Resource Sharing) with optional origin or allow all with default '*'.") parser.add_argument("--extra-model-paths-config", type=str, default=None, help="Load an extra_model_paths.yaml file.") parser.add_argument("--output-directory", type=str, default=None, help="Set the ComfyUI output directory.") +parser.add_argument("--cuda-device", type=int, default=None, help="Set the id of the cuda device this instance will use.") parser.add_argument("--dont-upcast-attention", action="store_true", help="Disable upcasting of attention. Can boost speed but increase the chances of black images.") attn_group = parser.add_mutually_exclusive_group() @@ -13,7 +15,6 @@ attn_group.add_argument("--use-split-cross-attention", action="store_true", help attn_group.add_argument("--use-pytorch-cross-attention", action="store_true", help="Use the new pytorch 2.0 cross attention function.") parser.add_argument("--disable-xformers", action="store_true", help="Disable xformers.") -parser.add_argument("--cuda-device", type=int, default=None, help="Set the id of the cuda device this instance will use.") vram_group = parser.add_mutually_exclusive_group() vram_group.add_argument("--highvram", action="store_true", help="By default models will be unloaded to CPU memory after being used. This option keeps them in GPU memory.") diff --git a/server.py b/server.py index 005bf9b2c..a9c0b4599 100644 --- a/server.py +++ b/server.py @@ -18,6 +18,7 @@ except ImportError: sys.exit() import mimetypes +from comfy.cli_args import args @web.middleware @@ -27,18 +28,22 @@ async def cache_control(request: web.Request, handler): response.headers.setdefault('Cache-Control', 'no-cache') return response -@web.middleware -async def cors_middleware(request: web.Request, handler): - if request.method == "OPTIONS": - # Pre-flight request. Reply successfully: - response = web.Response() - else: - response = await handler(request) - response.headers['Access-Control-Allow-Origin'] = '*' - response.headers['Access-Control-Allow-Methods'] = 'POST, GET, DELETE, PUT, OPTIONS' - response.headers['Access-Control-Allow-Headers'] = 'Content-Type, Authorization' - response.headers['Access-Control-Allow-Credentials'] = 'true' - return response +def create_cors_middleware(allowed_origin: str): + @web.middleware + async def cors_middleware(request: web.Request, handler): + if request.method == "OPTIONS": + # Pre-flight request. Reply successfully: + response = web.Response() + else: + response = await handler(request) + + response.headers['Access-Control-Allow-Origin'] = allowed_origin + response.headers['Access-Control-Allow-Methods'] = 'POST, GET, DELETE, PUT, OPTIONS' + response.headers['Access-Control-Allow-Headers'] = 'Content-Type, Authorization' + response.headers['Access-Control-Allow-Credentials'] = 'true' + return response + + return cors_middleware class PromptServer(): def __init__(self, loop): @@ -50,7 +55,12 @@ class PromptServer(): self.loop = loop self.messages = asyncio.Queue() self.number = 0 - self.app = web.Application(client_max_size=20971520, middlewares=[cache_control, cors_middleware]) + + middlewares = [cache_control] + if args.cors: + middlewares.append(create_cors_middleware(args.cors)) + + self.app = web.Application(client_max_size=20971520, middlewares=middlewares) self.sockets = dict() self.web_root = os.path.join(os.path.dirname( os.path.realpath(__file__)), "web") From f84f2508cc45a014cc27e023e9623db0450d237e Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 6 Apr 2023 15:24:55 -0400 Subject: [PATCH 50/62] Rename the cors parameter to something more verbose. --- comfy/cli_args.py | 2 +- server.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/comfy/cli_args.py b/comfy/cli_args.py index 5133e0ae5..f2960ae31 100644 --- a/comfy/cli_args.py +++ b/comfy/cli_args.py @@ -4,7 +4,7 @@ parser = argparse.ArgumentParser() parser.add_argument("--listen", nargs="?", const="0.0.0.0", default="127.0.0.1", type=str, help="Specify the IP address to listen on (default: 127.0.0.1). If --listen is provided without an argument, it defaults to 0.0.0.0. (listens on all)") parser.add_argument("--port", type=int, default=8188, help="Set the listen port.") -parser.add_argument("--cors", default=None, nargs="?", const="*", help="Enable CORS (Cross-Origin Resource Sharing) with optional origin or allow all with default '*'.") +parser.add_argument("--enable-cors-header", default=None, nargs="?", const="*", help="Enable CORS (Cross-Origin Resource Sharing) with optional origin or allow all with default '*'.") parser.add_argument("--extra-model-paths-config", type=str, default=None, help="Load an extra_model_paths.yaml file.") parser.add_argument("--output-directory", type=str, default=None, help="Set the ComfyUI output directory.") parser.add_argument("--cuda-device", type=int, default=None, help="Set the id of the cuda device this instance will use.") diff --git a/server.py b/server.py index a9c0b4599..95cdeb051 100644 --- a/server.py +++ b/server.py @@ -57,8 +57,8 @@ class PromptServer(): self.number = 0 middlewares = [cache_control] - if args.cors: - middlewares.append(create_cors_middleware(args.cors)) + if args.enable_cors_header: + middlewares.append(create_cors_middleware(args.enable_cors_header)) self.app = web.Application(client_max_size=20971520, middlewares=middlewares) self.sockets = dict() From 28fff5d1dbba8b4a546e31c69240133f35b2235f Mon Sep 17 00:00:00 2001 From: EllangoK Date: Thu, 6 Apr 2023 19:06:39 -0400 Subject: [PATCH 51/62] fixes lack of support for multi configs also adds some metavars to argarse --- comfy/cli_args.py | 8 ++++---- main.py | 5 ++++- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/comfy/cli_args.py b/comfy/cli_args.py index f2960ae31..b6898cea9 100644 --- a/comfy/cli_args.py +++ b/comfy/cli_args.py @@ -2,12 +2,12 @@ import argparse parser = argparse.ArgumentParser() -parser.add_argument("--listen", nargs="?", const="0.0.0.0", default="127.0.0.1", type=str, help="Specify the IP address to listen on (default: 127.0.0.1). If --listen is provided without an argument, it defaults to 0.0.0.0. (listens on all)") +parser.add_argument("--listen", type=str, default="127.0.0.1", metavar="IP", nargs="?", const="0.0.0.0", help="Specify the IP address to listen on (default: 127.0.0.1). If --listen is provided without an argument, it defaults to 0.0.0.0. (listens on all)") parser.add_argument("--port", type=int, default=8188, help="Set the listen port.") -parser.add_argument("--enable-cors-header", default=None, nargs="?", const="*", help="Enable CORS (Cross-Origin Resource Sharing) with optional origin or allow all with default '*'.") -parser.add_argument("--extra-model-paths-config", type=str, default=None, help="Load an extra_model_paths.yaml file.") +parser.add_argument("--enable-cors-header", type=str, default=None, metavar="ORIGIN", nargs="?", const="*", help="Enable CORS (Cross-Origin Resource Sharing) with optional origin or allow all with default '*'.") +parser.add_argument("--extra-model-paths-config", type=str, default=None, metavar="PATH", nargs='+', action='append', help="Load one or more extra_model_paths.yaml files.") parser.add_argument("--output-directory", type=str, default=None, help="Set the ComfyUI output directory.") -parser.add_argument("--cuda-device", type=int, default=None, help="Set the id of the cuda device this instance will use.") +parser.add_argument("--cuda-device", type=int, default=None, metavar="DEVICE_ID", help="Set the id of the cuda device this instance will use.") parser.add_argument("--dont-upcast-attention", action="store_true", help="Disable upcasting of attention. Can boost speed but increase the chances of black images.") attn_group = parser.add_mutually_exclusive_group() diff --git a/main.py b/main.py index 51a48fc6d..9c0a3d8a1 100644 --- a/main.py +++ b/main.py @@ -1,7 +1,9 @@ import asyncio +import itertools import os import shutil import threading + from comfy.cli_args import args if os.name == "nt": @@ -94,7 +96,8 @@ if __name__ == "__main__": load_extra_path_config(extra_model_paths_config_path) if args.extra_model_paths_config: - load_extra_path_config(args.extra_model_paths_config) + for config_path in itertools.chain(*args.extra_model_paths_config): + load_extra_path_config(config_path) if args.output_directory: output_dir = os.path.abspath(args.output_directory) From 60b4c31ab3c2ec16575c26d9d08ecabc8643b381 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 6 Apr 2023 22:22:59 -0400 Subject: [PATCH 52/62] Add webp images to upload accept list. --- web/scripts/widgets.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/scripts/widgets.js b/web/scripts/widgets.js index 5f5043cd0..d1a9c6c6e 100644 --- a/web/scripts/widgets.js +++ b/web/scripts/widgets.js @@ -306,7 +306,7 @@ export const ComfyWidgets = { const fileInput = document.createElement("input"); Object.assign(fileInput, { type: "file", - accept: "image/jpeg,image/png", + accept: "image/jpeg,image/png,image/webp", style: "display: none", onchange: async () => { if (fileInput.files.length) { From bceccca0e59862c3410b5d99b47fe1e01ba914af Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 6 Apr 2023 23:52:34 -0400 Subject: [PATCH 53/62] Small refactor. --- comfy/model_management.py | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index 92c59efe7..504da2190 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -129,7 +129,6 @@ def load_model_gpu(model): global current_loaded_model global vram_state global model_accelerated - global xpu_available if model is current_loaded_model: return @@ -148,17 +147,14 @@ def load_model_gpu(model): pass elif vram_state == VRAMState.NORMAL_VRAM or vram_state == VRAMState.HIGH_VRAM: model_accelerated = False - if xpu_available: - real_model.to("xpu") - else: - real_model.cuda() + real_model.to(get_torch_device()) else: if vram_state == VRAMState.NO_VRAM: device_map = accelerate.infer_auto_device_map(real_model, max_memory={0: "256MiB", "cpu": "16GiB"}) elif vram_state == VRAMState.LOW_VRAM: device_map = accelerate.infer_auto_device_map(real_model, max_memory={0: "{}MiB".format(total_vram_available_mb), "cpu": "16GiB"}) - accelerate.dispatch_model(real_model, device_map=device_map, main_device="xpu" if xpu_available else "cuda") + accelerate.dispatch_model(real_model, device_map=device_map, main_device=get_torch_device()) model_accelerated = True return current_loaded_model @@ -184,12 +180,8 @@ def load_controlnet_gpu(models): def load_if_low_vram(model): global vram_state - global xpu_available if vram_state == VRAMState.LOW_VRAM or vram_state == VRAMState.NO_VRAM: - if xpu_available: - return model.to("xpu") - else: - return model.cuda() + return model.to(get_torch_device()) return model def unload_if_low_vram(model): From 64557d67810c81f72bd6a7544bd8930488868319 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 7 Apr 2023 00:27:54 -0400 Subject: [PATCH 54/62] Add a --force-fp32 argument to force fp32 for debugging. --- comfy/cli_args.py | 1 + comfy/model_management.py | 8 ++++++++ 2 files changed, 9 insertions(+) diff --git a/comfy/cli_args.py b/comfy/cli_args.py index b6898cea9..739891f71 100644 --- a/comfy/cli_args.py +++ b/comfy/cli_args.py @@ -9,6 +9,7 @@ parser.add_argument("--extra-model-paths-config", type=str, default=None, metava parser.add_argument("--output-directory", type=str, default=None, help="Set the ComfyUI output directory.") parser.add_argument("--cuda-device", type=int, default=None, metavar="DEVICE_ID", help="Set the id of the cuda device this instance will use.") parser.add_argument("--dont-upcast-attention", action="store_true", help="Disable upcasting of attention. Can boost speed but increase the chances of black images.") +parser.add_argument("--force-fp32", action="store_true", help="Force fp32 (If this makes your GPU work better please report it).") attn_group = parser.add_mutually_exclusive_group() attn_group.add_argument("--use-split-cross-attention", action="store_true", help="Use the split cross attention optimization instead of the sub-quadratic one. Ignored when xformers is used.") diff --git a/comfy/model_management.py b/comfy/model_management.py index 504da2190..2407140fd 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -69,6 +69,11 @@ elif args.novram: elif args.highvram: vram_state = VRAMState.HIGH_VRAM +FORCE_FP32 = False +if args.force_fp32: + print("Forcing FP32, if this improves things please report it.") + FORCE_FP32 = True + if set_vram_to in (VRAMState.LOW_VRAM, VRAMState.NO_VRAM): try: @@ -273,6 +278,9 @@ def mps_mode(): def should_use_fp16(): global xpu_available + if FORCE_FP32: + return False + if cpu_mode() or mps_mode() or xpu_available: return False #TODO ? From 72a8973bd56b7cc179eb603ccd61385fdca5766d Mon Sep 17 00:00:00 2001 From: sALTaccount Date: Thu, 6 Apr 2023 21:45:08 -0700 Subject: [PATCH 55/62] allow configurable path for diffusers models --- folder_paths.py | 1 + nodes.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/folder_paths.py b/folder_paths.py index f13e4895f..ab3359347 100644 --- a/folder_paths.py +++ b/folder_paths.py @@ -23,6 +23,7 @@ folder_names_and_paths["clip"] = ([os.path.join(models_dir, "clip")], supported_ folder_names_and_paths["clip_vision"] = ([os.path.join(models_dir, "clip_vision")], supported_pt_extensions) folder_names_and_paths["style_models"] = ([os.path.join(models_dir, "style_models")], supported_pt_extensions) folder_names_and_paths["embeddings"] = ([os.path.join(models_dir, "embeddings")], supported_pt_extensions) +folder_names_and_paths["diffusers"] = ([os.path.join(models_dir, "diffusers")], ["folder"]) folder_names_and_paths["controlnet"] = ([os.path.join(models_dir, "controlnet"), os.path.join(models_dir, "t2i_adapter")], supported_pt_extensions) folder_names_and_paths["upscale_models"] = ([os.path.join(models_dir, "upscale_models")], supported_pt_extensions) diff --git a/nodes.py b/nodes.py index 8271da04c..934b458f2 100644 --- a/nodes.py +++ b/nodes.py @@ -224,7 +224,7 @@ class DiffusersLoader: @classmethod def INPUT_TYPES(cls): paths = [] - search_path = os.path.join(folder_paths.models_dir, 'diffusers') + search_path = folder_paths.get_folder_paths("diffusers")[0] if os.path.exists(search_path): paths = next(os.walk(search_path))[1] return {"required": {"model_path": (paths,), }} From f51b7a92c72b5fe7a12d642a545e59f1f6150fb4 Mon Sep 17 00:00:00 2001 From: sALTaccount Date: Thu, 6 Apr 2023 21:48:58 -0700 Subject: [PATCH 56/62] search all diffusers paths (oops) --- nodes.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/nodes.py b/nodes.py index 934b458f2..a4366f834 100644 --- a/nodes.py +++ b/nodes.py @@ -224,9 +224,10 @@ class DiffusersLoader: @classmethod def INPUT_TYPES(cls): paths = [] - search_path = folder_paths.get_folder_paths("diffusers")[0] - if os.path.exists(search_path): - paths = next(os.walk(search_path))[1] + search_paths = folder_paths.get_folder_paths("diffusers") + for search_path in search_paths: + if os.path.exists(search_path): + paths = next(os.walk(search_path))[1] return {"required": {"model_path": (paths,), }} RETURN_TYPES = ("MODEL", "CLIP", "VAE") FUNCTION = "load_checkpoint" From 7734d65f22a8f30f73cb72e81586b2d015229060 Mon Sep 17 00:00:00 2001 From: sALTaccount Date: Thu, 6 Apr 2023 22:02:26 -0700 Subject: [PATCH 57/62] fix loading alt folders --- nodes.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/nodes.py b/nodes.py index a4366f834..274ae2f1f 100644 --- a/nodes.py +++ b/nodes.py @@ -224,10 +224,9 @@ class DiffusersLoader: @classmethod def INPUT_TYPES(cls): paths = [] - search_paths = folder_paths.get_folder_paths("diffusers") - for search_path in search_paths: + for search_path in folder_paths.get_folder_paths("diffusers"): if os.path.exists(search_path): - paths = next(os.walk(search_path))[1] + paths += next(os.walk(search_path))[1] return {"required": {"model_path": (paths,), }} RETURN_TYPES = ("MODEL", "CLIP", "VAE") FUNCTION = "load_checkpoint" @@ -235,7 +234,13 @@ class DiffusersLoader: CATEGORY = "loaders" def load_checkpoint(self, model_path, output_vae=True, output_clip=True): - model_path = os.path.join(folder_paths.models_dir, 'diffusers', model_path) + for search_path in folder_paths.get_folder_paths("diffusers"): + if os.path.exists(search_path): + paths = next(os.walk(search_path))[1] + if model_path in paths: + model_path = os.path.join(search_path, model_path) + break + search_paths = folder_paths.get_folder_paths("diffusers") return load_diffusers(model_path, fp16=model_management.should_use_fp16(), output_vae=output_vae, output_clip=output_clip, embedding_directory=folder_paths.get_folder_paths("embeddings")) From 58ed0f2da438aaf253f9880578d694ad917819f8 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 7 Apr 2023 01:28:15 -0400 Subject: [PATCH 58/62] Fix loading SD1.5 diffusers checkpoint. --- comfy/diffusers_convert.py | 4 +++- nodes.py | 4 ++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/comfy/diffusers_convert.py b/comfy/diffusers_convert.py index a31c1c11b..950137f2c 100644 --- a/comfy/diffusers_convert.py +++ b/comfy/diffusers_convert.py @@ -272,7 +272,8 @@ def load_diffusers(model_path, fp16=True, output_vae=True, output_clip=True, emb # magic v2 = diffusers_unet_conf["sample_size"] == 96 - v_pred = diffusers_scheduler_conf['prediction_type'] == 'v_prediction' + if 'prediction_type' in diffusers_scheduler_conf: + v_pred = diffusers_scheduler_conf['prediction_type'] == 'v_prediction' if v2: if v_pred: @@ -290,6 +291,7 @@ def load_diffusers(model_path, fp16=True, output_vae=True, output_clip=True, emb scale_factor = model_config_params['scale_factor'] vae_config = model_config_params['first_stage_config'] vae_config['scale_factor'] = scale_factor + model_config_params["unet_config"]["params"]["use_fp16"] = fp16 unet_path = osp.join(model_path, "unet", "diffusion_pytorch_model.safetensors") vae_path = osp.join(model_path, "vae", "diffusion_pytorch_model.safetensors") diff --git a/nodes.py b/nodes.py index 274ae2f1f..025e4fcb4 100644 --- a/nodes.py +++ b/nodes.py @@ -231,7 +231,7 @@ class DiffusersLoader: RETURN_TYPES = ("MODEL", "CLIP", "VAE") FUNCTION = "load_checkpoint" - CATEGORY = "loaders" + CATEGORY = "advanced/loaders" def load_checkpoint(self, model_path, output_vae=True, output_clip=True): for search_path in folder_paths.get_folder_paths("diffusers"): @@ -240,7 +240,7 @@ class DiffusersLoader: if model_path in paths: model_path = os.path.join(search_path, model_path) break - search_paths = folder_paths.get_folder_paths("diffusers") + return load_diffusers(model_path, fp16=model_management.should_use_fp16(), output_vae=output_vae, output_clip=output_clip, embedding_directory=folder_paths.get_folder_paths("embeddings")) From 44fea050649347ca4b4e7317a83d11c3b4b87f87 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 7 Apr 2023 02:29:56 -0400 Subject: [PATCH 59/62] Cleanup. --- comfy/diffusers_convert.py | 4 ---- nodes.py | 4 ++-- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/comfy/diffusers_convert.py b/comfy/diffusers_convert.py index 950137f2c..ceca80305 100644 --- a/comfy/diffusers_convert.py +++ b/comfy/diffusers_convert.py @@ -2,10 +2,6 @@ import json import os import yaml -# because of local import nonsense -import sys -sys.path.append(os.path.dirname(os.path.realpath(__file__))) - import folder_paths from comfy.ldm.util import instantiate_from_config from comfy.sd import ModelPatcher, load_model_weights, CLIP, VAE diff --git a/nodes.py b/nodes.py index 025e4fcb4..5c3b3a4ee 100644 --- a/nodes.py +++ b/nodes.py @@ -10,11 +10,11 @@ from PIL import Image from PIL.PngImagePlugin import PngInfo import numpy as np -from comfy.diffusers_convert import load_diffusers sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy")) +import comfy.diffusers_convert import comfy.samplers import comfy.sd import comfy.utils @@ -241,7 +241,7 @@ class DiffusersLoader: model_path = os.path.join(search_path, model_path) break - return load_diffusers(model_path, fp16=model_management.should_use_fp16(), output_vae=output_vae, output_clip=output_clip, embedding_directory=folder_paths.get_folder_paths("embeddings")) + return comfy.diffusers_convert.load_diffusers(model_path, fp16=model_management.should_use_fp16(), output_vae=output_vae, output_clip=output_clip, embedding_directory=folder_paths.get_folder_paths("embeddings")) class unCLIPCheckpointLoader: From 07e9a6b8266326c5f51ea7b0cc20c6129bf5d238 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 7 Apr 2023 15:11:00 -0400 Subject: [PATCH 60/62] Update litegraph from upstream. --- web/lib/litegraph.core.js | 321 +++++++++++++++++++------------------- 1 file changed, 164 insertions(+), 157 deletions(-) diff --git a/web/lib/litegraph.core.js b/web/lib/litegraph.core.js index 862d59067..066f51938 100644 --- a/web/lib/litegraph.core.js +++ b/web/lib/litegraph.core.js @@ -89,6 +89,7 @@ NO_TITLE: 1, TRANSPARENT_TITLE: 2, AUTOHIDE_TITLE: 3, + VERTICAL_LAYOUT: "vertical", // arrange nodes vertically proxy: null, //used to redirect calls node_images_path: "", @@ -125,14 +126,14 @@ registered_slot_out_types: {}, // slot types for nodeclass slot_types_in: [], // slot types IN slot_types_out: [], // slot types OUT - slot_types_default_in: [], // specify for each IN slot type a(/many) deafult node(s), use single string, array, or object (with node, title, parameters, ..) like for search - slot_types_default_out: [], // specify for each OUT slot type a(/many) deafult node(s), use single string, array, or object (with node, title, parameters, ..) like for search + slot_types_default_in: [], // specify for each IN slot type a(/many) default node(s), use single string, array, or object (with node, title, parameters, ..) like for search + slot_types_default_out: [], // specify for each OUT slot type a(/many) default node(s), use single string, array, or object (with node, title, parameters, ..) like for search alt_drag_do_clone_nodes: false, // [true!] very handy, ALT click to clone and drag the new node do_add_triggers_slots: false, // [true!] will create and connect event slots when using action/events connections, !WILL CHANGE node mode when using onTrigger (enable mode colors), onExecuted does not need this - allow_multi_output_for_events: true, // [false!] being events, it is strongly reccomended to use them sequentually, one by one + allow_multi_output_for_events: true, // [false!] being events, it is strongly reccomended to use them sequentially, one by one middle_click_slot_add_default_node: false, //[true!] allows to create and connect a ndoe clicking with the third button (wheel) @@ -158,80 +159,67 @@ console.log("Node registered: " + type); } - var categories = type.split("/"); - var classname = base_class.name; + const classname = base_class.name; - var pos = type.lastIndexOf("/"); - base_class.category = type.substr(0, pos); + const pos = type.lastIndexOf("/"); + base_class.category = type.substring(0, pos); if (!base_class.title) { base_class.title = classname; } - //info.name = name.substr(pos+1,name.length - pos); //extend class - if (base_class.prototype) { - //is a class - for (var i in LGraphNode.prototype) { - if (!base_class.prototype[i]) { - base_class.prototype[i] = LGraphNode.prototype[i]; - } + for (var i in LGraphNode.prototype) { + if (!base_class.prototype[i]) { + base_class.prototype[i] = LGraphNode.prototype[i]; } } - var prev = this.registered_node_types[type]; - if(prev) - console.log("replacing node type: " + type); - else - { - if( !Object.hasOwnProperty( base_class.prototype, "shape") ) - Object.defineProperty(base_class.prototype, "shape", { - set: function(v) { - switch (v) { - case "default": - delete this._shape; - break; - case "box": - this._shape = LiteGraph.BOX_SHAPE; - break; - case "round": - this._shape = LiteGraph.ROUND_SHAPE; - break; - case "circle": - this._shape = LiteGraph.CIRCLE_SHAPE; - break; - case "card": - this._shape = LiteGraph.CARD_SHAPE; - break; - default: - this._shape = v; - } - }, - get: function(v) { - return this._shape; - }, - enumerable: true, - configurable: true - }); + const prev = this.registered_node_types[type]; + if(prev) { + console.log("replacing node type: " + type); + } + if( !Object.prototype.hasOwnProperty.call( base_class.prototype, "shape") ) { + Object.defineProperty(base_class.prototype, "shape", { + set: function(v) { + switch (v) { + case "default": + delete this._shape; + break; + case "box": + this._shape = LiteGraph.BOX_SHAPE; + break; + case "round": + this._shape = LiteGraph.ROUND_SHAPE; + break; + case "circle": + this._shape = LiteGraph.CIRCLE_SHAPE; + break; + case "card": + this._shape = LiteGraph.CARD_SHAPE; + break; + default: + this._shape = v; + } + }, + get: function() { + return this._shape; + }, + enumerable: true, + configurable: true + }); + - //warnings - if (base_class.prototype.onPropertyChange) { - console.warn( - "LiteGraph node class " + - type + - " has onPropertyChange method, it must be called onPropertyChanged with d at the end" - ); - } - - //used to know which nodes create when dragging files to the canvas - if (base_class.supported_extensions) { - for (var i in base_class.supported_extensions) { - var ext = base_class.supported_extensions[i]; - if(ext && ext.constructor === String) - this.node_types_by_file_extension[ ext.toLowerCase() ] = base_class; - } - } - } + //used to know which nodes to create when dragging files to the canvas + if (base_class.supported_extensions) { + for (let i in base_class.supported_extensions) { + const ext = base_class.supported_extensions[i]; + if(ext && ext.constructor === String) { + this.node_types_by_file_extension[ ext.toLowerCase() ] = base_class; + } + } + } + } this.registered_node_types[type] = base_class; if (base_class.constructor.name) { @@ -252,19 +240,11 @@ " has onPropertyChange method, it must be called onPropertyChanged with d at the end" ); } - - //used to know which nodes create when dragging files to the canvas - if (base_class.supported_extensions) { - for (var i=0; i < base_class.supported_extensions.length; i++) { - var ext = base_class.supported_extensions[i]; - if(ext && ext.constructor === String) - this.node_types_by_file_extension[ ext.toLowerCase() ] = base_class; - } - } - // TODO one would want to know input and ouput :: this would allow trought registerNodeAndSlotType to get all the slots types - //console.debug("Registering "+type); - if (this.auto_load_slot_types) nodeTmp = new base_class(base_class.title || "tmpnode"); + // TODO one would want to know input and ouput :: this would allow through registerNodeAndSlotType to get all the slots types + if (this.auto_load_slot_types) { + new base_class(base_class.title || "tmpnode"); + } }, /** @@ -1260,37 +1240,39 @@ * Positions every node in a more readable manner * @method arrange */ - LGraph.prototype.arrange = function(margin) { + LGraph.prototype.arrange = function (margin, layout) { margin = margin || 100; - var nodes = this.computeExecutionOrder(false, true); - var columns = []; - for (var i = 0; i < nodes.length; ++i) { - var node = nodes[i]; - var col = node._level || 1; + const nodes = this.computeExecutionOrder(false, true); + const columns = []; + for (let i = 0; i < nodes.length; ++i) { + const node = nodes[i]; + const col = node._level || 1; if (!columns[col]) { columns[col] = []; } columns[col].push(node); } - var x = margin; + let x = margin; - for (var i = 0; i < columns.length; ++i) { - var column = columns[i]; + for (let i = 0; i < columns.length; ++i) { + const column = columns[i]; if (!column) { continue; } - var max_size = 100; - var y = margin + LiteGraph.NODE_TITLE_HEIGHT; - for (var j = 0; j < column.length; ++j) { - var node = column[j]; - node.pos[0] = x; - node.pos[1] = y; - if (node.size[0] > max_size) { - max_size = node.size[0]; + let max_size = 100; + let y = margin + LiteGraph.NODE_TITLE_HEIGHT; + for (let j = 0; j < column.length; ++j) { + const node = column[j]; + node.pos[0] = (layout == LiteGraph.VERTICAL_LAYOUT) ? y : x; + node.pos[1] = (layout == LiteGraph.VERTICAL_LAYOUT) ? x : y; + const max_size_index = (layout == LiteGraph.VERTICAL_LAYOUT) ? 1 : 0; + if (node.size[max_size_index] > max_size) { + max_size = node.size[max_size_index]; } - y += node.size[1] + margin + LiteGraph.NODE_TITLE_HEIGHT; + const node_size_index = (layout == LiteGraph.VERTICAL_LAYOUT) ? 0 : 1; + y += node.size[node_size_index] + margin + LiteGraph.NODE_TITLE_HEIGHT; } x += max_size + margin; } @@ -2468,43 +2450,34 @@ this.title = this.constructor.title; } - if (this.onConnectionsChange) { - if (this.inputs) { - for (var i = 0; i < this.inputs.length; ++i) { - var input = this.inputs[i]; - var link_info = this.graph - ? this.graph.links[input.link] - : null; - this.onConnectionsChange( - LiteGraph.INPUT, - i, - true, - link_info, - input - ); //link_info has been created now, so its updated - } - } + if (this.inputs) { + for (var i = 0; i < this.inputs.length; ++i) { + var input = this.inputs[i]; + var link_info = this.graph ? this.graph.links[input.link] : null; + if (this.onConnectionsChange) + this.onConnectionsChange( LiteGraph.INPUT, i, true, link_info, input ); //link_info has been created now, so its updated - if (this.outputs) { - for (var i = 0; i < this.outputs.length; ++i) { - var output = this.outputs[i]; - if (!output.links) { - continue; - } - for (var j = 0; j < output.links.length; ++j) { - var link_info = this.graph - ? this.graph.links[output.links[j]] - : null; - this.onConnectionsChange( - LiteGraph.OUTPUT, - i, - true, - link_info, - output - ); //link_info has been created now, so its updated - } - } - } + if( this.onInputAdded ) + this.onInputAdded(input); + + } + } + + if (this.outputs) { + for (var i = 0; i < this.outputs.length; ++i) { + var output = this.outputs[i]; + if (!output.links) { + continue; + } + for (var j = 0; j < output.links.length; ++j) { + var link_info = this.graph ? this.graph.links[output.links[j]] : null; + if (this.onConnectionsChange) + this.onConnectionsChange( LiteGraph.OUTPUT, i, true, link_info, output ); //link_info has been created now, so its updated + } + + if( this.onOutputAdded ) + this.onOutputAdded(output); + } } if( this.widgets ) @@ -3200,6 +3173,15 @@ return; } + if(slot == null) + { + console.error("slot must be a number"); + return; + } + + if(slot.constructor !== Number) + console.warn("slot must be a number, use node.trigger('name') if you want to use a string"); + var output = this.outputs[slot]; if (!output) { return; @@ -3346,26 +3328,26 @@ * @param {Object} extra_info this can be used to have special properties of an output (label, special color, position, etc) */ LGraphNode.prototype.addOutput = function(name, type, extra_info) { - var o = { name: name, type: type, links: null }; + var output = { name: name, type: type, links: null }; if (extra_info) { for (var i in extra_info) { - o[i] = extra_info[i]; + output[i] = extra_info[i]; } } if (!this.outputs) { this.outputs = []; } - this.outputs.push(o); + this.outputs.push(output); if (this.onOutputAdded) { - this.onOutputAdded(o); + this.onOutputAdded(output); } if (LiteGraph.auto_load_slot_types) LiteGraph.registerNodeAndSlotType(this,type,true); this.setSize( this.computeSize() ); this.setDirtyCanvas(true, true); - return o; + return output; }; /** @@ -3437,10 +3419,10 @@ */ LGraphNode.prototype.addInput = function(name, type, extra_info) { type = type || 0; - var o = { name: name, type: type, link: null }; + var input = { name: name, type: type, link: null }; if (extra_info) { for (var i in extra_info) { - o[i] = extra_info[i]; + input[i] = extra_info[i]; } } @@ -3448,17 +3430,17 @@ this.inputs = []; } - this.inputs.push(o); + this.inputs.push(input); this.setSize( this.computeSize() ); if (this.onInputAdded) { - this.onInputAdded(o); + this.onInputAdded(input); } LiteGraph.registerNodeAndSlotType(this,type); this.setDirtyCanvas(true, true); - return o; + return input; }; /** @@ -5210,6 +5192,7 @@ LGraphNode.prototype.executeAction = function(action) this.allow_dragcanvas = true; this.allow_dragnodes = true; this.allow_interaction = true; //allow to control widgets, buttons, collapse, etc + this.multi_select = false; //allow selecting multi nodes without pressing extra keys this.allow_searchbox = true; this.allow_reconnect_links = true; //allows to change a connection with having to redo it again this.align_to_grid = false; //snap to grid @@ -5435,7 +5418,7 @@ LGraphNode.prototype.executeAction = function(action) }; /** - * returns the visualy active graph (in case there are more in the stack) + * returns the visually active graph (in case there are more in the stack) * @method getCurrentGraph * @return {LGraph} the active graph */ @@ -6060,9 +6043,13 @@ LGraphNode.prototype.executeAction = function(action) this.graph.beforeChange(); this.node_dragged = node; } - if (!this.selected_nodes[node.id]) { - this.processNodeSelected(node, e); - } + this.processNodeSelected(node, e); + } else { // double-click + /** + * Don't call the function if the block is already selected. + * Otherwise, it could cause the block to be unselected while its panel is open. + */ + if (!node.is_selected) this.processNodeSelected(node, e); } this.dirty_canvas = true; @@ -6474,6 +6461,10 @@ LGraphNode.prototype.executeAction = function(action) var n = this.selected_nodes[i]; n.pos[0] += delta[0] / this.ds.scale; n.pos[1] += delta[1] / this.ds.scale; + if (!n.is_selected) this.processNodeSelected(n, e); /* + * Don't call the function if the block is already selected. + * Otherwise, it could cause the block to be unselected while dragging. + */ } this.dirty_canvas = true; @@ -7287,7 +7278,7 @@ LGraphNode.prototype.executeAction = function(action) }; LGraphCanvas.prototype.processNodeSelected = function(node, e) { - this.selectNode(node, e && (e.shiftKey||e.ctrlKey)); + this.selectNode(node, e && (e.shiftKey || e.ctrlKey || this.multi_select)); if (this.onNodeSelected) { this.onNodeSelected(node); } @@ -7323,6 +7314,7 @@ LGraphNode.prototype.executeAction = function(action) for (var i in nodes) { var node = nodes[i]; if (node.is_selected) { + this.deselectNode(node); continue; } @@ -7489,8 +7481,8 @@ LGraphNode.prototype.executeAction = function(action) clientY_rel = e.clientY; } - e.deltaX = clientX_rel - this.last_mouse_position[0]; - e.deltaY = clientY_rel- this.last_mouse_position[1]; + // e.deltaX = clientX_rel - this.last_mouse_position[0]; + // e.deltaY = clientY_rel- this.last_mouse_position[1]; this.last_mouse_position[0] = clientX_rel; this.last_mouse_position[1] = clientY_rel; @@ -9742,13 +9734,17 @@ LGraphNode.prototype.executeAction = function(action) ctx.fillRect(margin, y, widget_width - margin * 2, H); var range = w.options.max - w.options.min; var nvalue = (w.value - w.options.min) / range; - ctx.fillStyle = active_widget == w ? "#89A" : "#678"; + if(nvalue < 0.0) nvalue = 0.0; + if(nvalue > 1.0) nvalue = 1.0; + ctx.fillStyle = w.options.hasOwnProperty("slider_color") ? w.options.slider_color : (active_widget == w ? "#89A" : "#678"); ctx.fillRect(margin, y, nvalue * (widget_width - margin * 2), H); if(show_text && !w.disabled) ctx.strokeRect(margin, y, widget_width - margin * 2, H); if (w.marker) { var marker_nvalue = (w.marker - w.options.min) / range; - ctx.fillStyle = "#AA9"; + if(marker_nvalue < 0.0) marker_nvalue = 0.0; + if(marker_nvalue > 1.0) marker_nvalue = 1.0; + ctx.fillStyle = w.options.hasOwnProperty("marker_color") ? w.options.marker_color : "#AA9"; ctx.fillRect( margin + marker_nvalue * (widget_width - margin * 2), y, 2, H ); } if (show_text) { @@ -9915,6 +9911,7 @@ LGraphNode.prototype.executeAction = function(action) case "slider": var range = w.options.max - w.options.min; var nvalue = Math.clamp((x - 15) / (widget_width - 30), 0, 1); + if(w.options.read_only) break; w.value = w.options.min + (w.options.max - w.options.min) * nvalue; if (w.callback) { setTimeout(function() { @@ -9927,7 +9924,8 @@ LGraphNode.prototype.executeAction = function(action) case "combo": var old_value = w.value; if (event.type == LiteGraph.pointerevents_method+"move" && w.type == "number") { - w.value += event.deltaX * 0.1 * (w.options.step || 1); + if(event.deltaX) + w.value += event.deltaX * 0.1 * (w.options.step || 1); if ( w.options.min != null && w.value < w.options.min ) { w.value = w.options.min; } @@ -9994,6 +9992,12 @@ LGraphNode.prototype.executeAction = function(action) var delta = x < 40 ? -1 : x > widget_width - 40 ? 1 : 0; if (event.click_time < 200 && delta == 0) { this.prompt("Value",w.value,function(v) { + // check if v is a valid equation or a number + if (/^[0-9+\-*/()\s]+$/.test(v)) { + try {//solve the equation if possible + v = eval(v); + } catch (e) { } + } this.value = Number(v); inner_value_change(this, this.value); }.bind(w), @@ -10022,7 +10026,6 @@ LGraphNode.prototype.executeAction = function(action) case "text": if (event.type == LiteGraph.pointerevents_method+"down") { this.prompt("Value",w.value,function(v) { - this.value = v; inner_value_change(this, v); }.bind(w), event,w.options ? w.options.multiline : false ); @@ -10047,6 +10050,9 @@ LGraphNode.prototype.executeAction = function(action) }//end for function inner_value_change(widget, value) { + if(widget.type == "number"){ + value = Number(value); + } widget.value = value; if ( widget.options && widget.options.property && node.properties[widget.options.property] !== undefined ) { node.setProperty( widget.options.property, value ); @@ -11165,7 +11171,7 @@ LGraphNode.prototype.executeAction = function(action) LGraphCanvas.search_limit = -1; LGraphCanvas.prototype.showSearchBox = function(event, options) { // proposed defaults - def_options = { slot_from: null + var def_options = { slot_from: null ,node_from: null ,node_to: null ,do_type_filter: LiteGraph.search_filter_enabled // TODO check for registered_slot_[in/out]_types not empty // this will be checked for functionality enabled : filter on slot type, in and out @@ -11863,7 +11869,7 @@ LGraphNode.prototype.executeAction = function(action) // TODO refactor, theer are different dialog, some uses createDialog, some dont LGraphCanvas.prototype.createDialog = function(html, options) { - def_options = { checkForInput: false, closeOnLeave: true, closeOnLeave_checkModified: true }; + var def_options = { checkForInput: false, closeOnLeave: true, closeOnLeave_checkModified: true }; options = Object.assign(def_options, options || {}); var dialog = document.createElement("div"); @@ -11993,7 +11999,8 @@ LGraphNode.prototype.executeAction = function(action) if (root.onClose && typeof root.onClose == "function"){ root.onClose(); } - root.parentNode.removeChild(root); + if(root.parentNode) + root.parentNode.removeChild(root); /* XXX CHECK THIS */ if(this.parentNode){ this.parentNode.removeChild(this); @@ -12285,7 +12292,7 @@ LGraphNode.prototype.executeAction = function(action) var ref_window = this.getCanvasWindow(); var that = this; var graphcanvas = this; - panel = this.createPanel(node.title || "",{ + var panel = this.createPanel(node.title || "",{ closable: true ,window: ref_window ,onOpen: function(){ From 7e254d2f690ddd49d4d164b56be0e534a1fd2a2d Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 7 Apr 2023 15:52:56 -0400 Subject: [PATCH 61/62] Clarify what --windows-standalone-build does. --- comfy/cli_args.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/cli_args.py b/comfy/cli_args.py index 739891f71..b24054ce0 100644 --- a/comfy/cli_args.py +++ b/comfy/cli_args.py @@ -26,6 +26,6 @@ vram_group.add_argument("--cpu", action="store_true", help="To use the CPU for e parser.add_argument("--dont-print-server", action="store_true", help="Don't print server output.") parser.add_argument("--quick-test-for-ci", action="store_true", help="Quick test for CI.") -parser.add_argument("--windows-standalone-build", action="store_true", help="Windows standalone build.") +parser.add_argument("--windows-standalone-build", action="store_true", help="Windows standalone build: Enable convenient things that most people using the standalone windows build will probably enjoy (like auto opening the page on startup).") args = parser.parse_args() From f4e359cce1fe0e929bde617083a414961dd871b3 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 7 Apr 2023 16:26:06 -0400 Subject: [PATCH 62/62] Set title of page to ComfyUI. --- web/index.html | 1 + 1 file changed, 1 insertion(+) diff --git a/web/index.html b/web/index.html index 86156a7f8..bb79433ce 100644 --- a/web/index.html +++ b/web/index.html @@ -2,6 +2,7 @@ + ComfyUI