From dfd6489c9622fdf48728e336fc263df283c84903 Mon Sep 17 00:00:00 2001 From: Chris Date: Sun, 3 Sep 2023 07:53:02 +1000 Subject: [PATCH 01/31] onExecutionStart --- web/scripts/app.js | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/web/scripts/app.js b/web/scripts/app.js index 3b7483cdf..ce5e27d0c 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -994,6 +994,10 @@ export class ComfyApp { api.addEventListener("execution_start", ({ detail }) => { this.runningNodeId = null; this.lastExecutionError = null + this.graph._nodes.forEach((node) => { + if (node.onExecutionStart) + node.onExecutionStart() + }) }); api.addEventListener("execution_error", ({ detail }) => { From 4a0c4ce4ef3c1e0f2b777dcd20a8864be1420f19 Mon Sep 17 00:00:00 2001 From: Simon Lui <502929+simonlui@users.noreply.github.com> Date: Sat, 2 Sep 2023 18:22:10 -0700 Subject: [PATCH 02/31] Some fixes to generalize CUDA specific functionality to Intel or other GPUs. --- comfy/ldm/modules/attention.py | 3 +- comfy/ldm/modules/diffusionmodules/util.py | 24 ++++++++++---- comfy/model_management.py | 37 ++++++++++++---------- 3 files changed, 38 insertions(+), 26 deletions(-) diff --git a/comfy/ldm/modules/attention.py b/comfy/ldm/modules/attention.py index 9fdfbd217..8f953d337 100644 --- a/comfy/ldm/modules/attention.py +++ b/comfy/ldm/modules/attention.py @@ -323,8 +323,7 @@ class CrossAttentionDoggettx(nn.Module): break except model_management.OOM_EXCEPTION as e: if first_op_done == False: - torch.cuda.empty_cache() - torch.cuda.ipc_collect() + model_management.soft_empty_cache() if cleared_cache == False: cleared_cache = True print("out of memory error, emptying cache and trying again") diff --git a/comfy/ldm/modules/diffusionmodules/util.py b/comfy/ldm/modules/diffusionmodules/util.py index d890c8044..9d07d9359 100644 --- a/comfy/ldm/modules/diffusionmodules/util.py +++ b/comfy/ldm/modules/diffusionmodules/util.py @@ -15,6 +15,7 @@ import torch.nn as nn import numpy as np from einops import repeat +from comfy import model_management from comfy.ldm.util import instantiate_from_config import comfy.ops @@ -139,13 +140,22 @@ class CheckpointFunction(torch.autograd.Function): @staticmethod def backward(ctx, *output_grads): ctx.input_tensors = [x.detach().requires_grad_(True) for x in ctx.input_tensors] - with torch.enable_grad(), \ - torch.cuda.amp.autocast(**ctx.gpu_autocast_kwargs): - # Fixes a bug where the first op in run_function modifies the - # Tensor storage in place, which is not allowed for detach()'d - # Tensors. - shallow_copies = [x.view_as(x) for x in ctx.input_tensors] - output_tensors = ctx.run_function(*shallow_copies) + if model_management.is_nvidia(): + with torch.enable_grad(), \ + torch.cuda.amp.autocast(**ctx.gpu_autocast_kwargs): + # Fixes a bug where the first op in run_function modifies the + # Tensor storage in place, which is not allowed for detach()'d + # Tensors. + shallow_copies = [x.view_as(x) for x in ctx.input_tensors] + output_tensors = ctx.run_function(*shallow_copies) + elif model_management.is_intel_xpu(): + with torch.enable_grad(), \ + torch.xpu.amp.autocast(**ctx.gpu_autocast_kwargs): + # Fixes a bug where the first op in run_function modifies the + # Tensor storage in place, which is not allowed for detach()'d + # Tensors. + shallow_copies = [x.view_as(x) for x in ctx.input_tensors] + output_tensors = ctx.run_function(*shallow_copies) input_grads = torch.autograd.grad( output_tensors, ctx.input_tensors + ctx.input_params, diff --git a/comfy/model_management.py b/comfy/model_management.py index aca8af999..bdbbbd843 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -58,8 +58,15 @@ except: if args.cpu: cpu_state = CPUState.CPU -def get_torch_device(): +def is_intel_xpu(): + global cpu_state global xpu_available + if cpu_state == CPUState.GPU: + if xpu_available: + return True + return False + +def get_torch_device(): global directml_enabled global cpu_state if directml_enabled: @@ -70,13 +77,12 @@ def get_torch_device(): if cpu_state == CPUState.CPU: return torch.device("cpu") else: - if xpu_available: + if is_intel_xpu(): return torch.device("xpu") else: return torch.device(torch.cuda.current_device()) def get_total_memory(dev=None, torch_total_too=False): - global xpu_available global directml_enabled if dev is None: dev = get_torch_device() @@ -88,7 +94,7 @@ def get_total_memory(dev=None, torch_total_too=False): if directml_enabled: mem_total = 1024 * 1024 * 1024 #TODO mem_total_torch = mem_total - elif xpu_available: + elif is_intel_xpu(): stats = torch.xpu.memory_stats(dev) mem_reserved = stats['reserved_bytes.all.current'] mem_total = torch.xpu.get_device_properties(dev).total_memory @@ -146,11 +152,11 @@ def is_nvidia(): if cpu_state == CPUState.GPU: if torch.version.cuda: return True + return False ENABLE_PYTORCH_ATTENTION = args.use_pytorch_cross_attention VAE_DTYPE = torch.float32 - try: if is_nvidia(): torch_version = torch.version.__version__ @@ -162,6 +168,9 @@ try: except: pass +if is_intel_xpu(): + VAE_DTYPE = torch.bfloat16 + if args.fp16_vae: VAE_DTYPE = torch.float16 elif args.bf16_vae: @@ -220,7 +229,6 @@ if DISABLE_SMART_MEMORY: print("Disabling smart memory management") def get_torch_device_name(device): - global xpu_available if hasattr(device, 'type'): if device.type == "cuda": try: @@ -230,7 +238,7 @@ def get_torch_device_name(device): return "{} {} : {}".format(device, torch.cuda.get_device_name(device), allocator_backend) else: return "{}".format(device.type) - elif xpu_available: + elif is_intel_xpu(): return "{} {}".format(device, torch.xpu.get_device_name(device)) else: return "CUDA {}: {}".format(device, torch.cuda.get_device_name(device)) @@ -260,7 +268,6 @@ class LoadedModel: return self.model_memory() def model_load(self, lowvram_model_memory=0): - global xpu_available patch_model_to = None if lowvram_model_memory == 0: patch_model_to = self.device @@ -281,7 +288,7 @@ class LoadedModel: accelerate.dispatch_model(self.real_model, device_map=device_map, main_device=self.device) self.model_accelerated = True - if xpu_available and not args.disable_ipex_optimize: + if is_intel_xpu() and not args.disable_ipex_optimize: self.real_model = torch.xpu.optimize(self.real_model.eval(), inplace=True, auto_kernel_selection=True, graph_mode=True) return self.real_model @@ -471,12 +478,11 @@ def get_autocast_device(dev): def xformers_enabled(): - global xpu_available global directml_enabled global cpu_state if cpu_state != CPUState.GPU: return False - if xpu_available: + if is_intel_xpu(): return False if directml_enabled: return False @@ -503,7 +509,6 @@ def pytorch_attention_flash_attention(): return False def get_free_memory(dev=None, torch_free_too=False): - global xpu_available global directml_enabled if dev is None: dev = get_torch_device() @@ -515,7 +520,7 @@ def get_free_memory(dev=None, torch_free_too=False): if directml_enabled: mem_free_total = 1024 * 1024 * 1024 #TODO mem_free_torch = mem_free_total - elif xpu_available: + elif is_intel_xpu(): stats = torch.xpu.memory_stats(dev) mem_active = stats['active_bytes.all.current'] mem_allocated = stats['allocated_bytes.all.current'] @@ -577,7 +582,6 @@ def is_device_mps(device): return False def should_use_fp16(device=None, model_params=0, prioritize_performance=True): - global xpu_available global directml_enabled if device is not None: @@ -600,7 +604,7 @@ def should_use_fp16(device=None, model_params=0, prioritize_performance=True): if cpu_mode() or mps_mode(): return False #TODO ? - if xpu_available: + if is_intel_xpu(): return True if torch.cuda.is_bf16_supported(): @@ -636,11 +640,10 @@ def should_use_fp16(device=None, model_params=0, prioritize_performance=True): return True def soft_empty_cache(): - global xpu_available global cpu_state if cpu_state == CPUState.MPS: torch.mps.empty_cache() - elif xpu_available: + elif is_intel_xpu(): torch.xpu.empty_cache() elif torch.cuda.is_available(): if is_nvidia(): #This seems to make things worse on ROCm so I only do it for cuda From 2da73b7073dc520ee480dee8ff911b9aa83ff70a Mon Sep 17 00:00:00 2001 From: Simon Lui <502929+simonlui@users.noreply.github.com> Date: Sat, 2 Sep 2023 20:07:52 -0700 Subject: [PATCH 03/31] Revert changes in comfy/ldm/modules/diffusionmodules/util.py, which is unused. --- comfy/ldm/modules/diffusionmodules/util.py | 24 +++++++--------------- 1 file changed, 7 insertions(+), 17 deletions(-) diff --git a/comfy/ldm/modules/diffusionmodules/util.py b/comfy/ldm/modules/diffusionmodules/util.py index 9d07d9359..d890c8044 100644 --- a/comfy/ldm/modules/diffusionmodules/util.py +++ b/comfy/ldm/modules/diffusionmodules/util.py @@ -15,7 +15,6 @@ import torch.nn as nn import numpy as np from einops import repeat -from comfy import model_management from comfy.ldm.util import instantiate_from_config import comfy.ops @@ -140,22 +139,13 @@ class CheckpointFunction(torch.autograd.Function): @staticmethod def backward(ctx, *output_grads): ctx.input_tensors = [x.detach().requires_grad_(True) for x in ctx.input_tensors] - if model_management.is_nvidia(): - with torch.enable_grad(), \ - torch.cuda.amp.autocast(**ctx.gpu_autocast_kwargs): - # Fixes a bug where the first op in run_function modifies the - # Tensor storage in place, which is not allowed for detach()'d - # Tensors. - shallow_copies = [x.view_as(x) for x in ctx.input_tensors] - output_tensors = ctx.run_function(*shallow_copies) - elif model_management.is_intel_xpu(): - with torch.enable_grad(), \ - torch.xpu.amp.autocast(**ctx.gpu_autocast_kwargs): - # Fixes a bug where the first op in run_function modifies the - # Tensor storage in place, which is not allowed for detach()'d - # Tensors. - shallow_copies = [x.view_as(x) for x in ctx.input_tensors] - output_tensors = ctx.run_function(*shallow_copies) + with torch.enable_grad(), \ + torch.cuda.amp.autocast(**ctx.gpu_autocast_kwargs): + # Fixes a bug where the first op in run_function modifies the + # Tensor storage in place, which is not allowed for detach()'d + # Tensors. + shallow_copies = [x.view_as(x) for x in ctx.input_tensors] + output_tensors = ctx.run_function(*shallow_copies) input_grads = torch.autograd.grad( output_tensors, ctx.input_tensors + ctx.input_params, From 6f70227b8cbc61d5e228857aed991a8ae1ef1a33 Mon Sep 17 00:00:00 2001 From: Michael Abrahams Date: Sun, 3 Sep 2023 11:51:50 -0400 Subject: [PATCH 04/31] Add support for pasting images into the graph It can be useful to paste images from the clipboard directly into the node graph. This commit modifies copy and paste handling to support this. When an image file is found in the clipboard, we check whether an image node is selected. If so, paste the image into that node. Otherwise, a new node is created. If no image data are found in the clipboard, we call the original Litegraph paste. To ensure that onCopy and onPaste events are fired, we override Litegraph's ctrl+c and ctrl+v handling. Try to detect whether the pasted image is a real file on disk, or just pixel data copied from e.g. Photoshop. Pasted pixel data will be called 'image.png' and have a creation time of now. If it is simply pasted data, we store it in the subfolder /input/clipboard/. This also adds support for the subfolder property in the IMAGEUPLOAD widget. --- web/scripts/app.js | 93 ++++++++++++++++++++++++++++++++++++------ web/scripts/widgets.js | 29 +++++++++---- 2 files changed, 102 insertions(+), 20 deletions(-) diff --git a/web/scripts/app.js b/web/scripts/app.js index 3b7483cdf..b5114604a 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -667,11 +667,40 @@ export class ComfyApp { } /** - * Adds a handler on paste that extracts and loads workflows from pasted JSON data + * Adds a handler on paste that extracts and loads images or workflows from pasted JSON data */ #addPasteHandler() { document.addEventListener("paste", (e) => { - let data = (e.clipboardData || window.clipboardData).getData("text/plain"); + let data = (e.clipboardData || window.clipboardData); + const items = data.items; + + // Look for image paste data + for (const item of items) { + if (item.type.startsWith('image/')) { + var imageNode = null; + + // If an image node is selected, paste into it + if (this.canvas.current_node && + this.canvas.current_node.is_selected && + ComfyApp.isImageNode(this.canvas.current_node)) { + imageNode = this.canvas.current_node; + } + + // No image node selected: add a new one + if (!imageNode) { + const newNode = LiteGraph.createNode("LoadImage"); + newNode.pos = [...this.canvas.graph_mouse]; + imageNode = this.graph.add(newNode); + this.graph.change(); + } + const blob = item.getAsFile(); + imageNode.pasteFile(blob); + return; + } + } + + // No image found. Look for node data + data = data.getData("text/plain"); let workflow; try { data = data.slice(data.indexOf("{")); @@ -687,9 +716,29 @@ export class ComfyApp { if (workflow && workflow.version && workflow.nodes && workflow.extra) { this.loadGraphData(workflow); } + else { + // Litegraph default paste + this.canvas.pasteFromClipboard(); + } + + }); } + + /** + * Adds a handler on copy that serializes selected nodes to JSON + */ + #addCopyHandler() { + document.addEventListener("copy", (e) => { + // copy + if (this.canvas.selected_nodes) { + this.canvas.copyToClipboard(); + } + }); + } + + /** * Handle mouse * @@ -745,12 +794,6 @@ export class ComfyApp { const self = this; const origProcessKey = LGraphCanvas.prototype.processKey; LGraphCanvas.prototype.processKey = function(e) { - const res = origProcessKey.apply(this, arguments); - - if (res === false) { - return res; - } - if (!this.graph) { return; } @@ -761,9 +804,10 @@ export class ComfyApp { return; } - if (e.type == "keydown") { + if (e.type == "keydown" && !e.repeat) { + // Ctrl + M mute/unmute - if (e.keyCode == 77 && e.ctrlKey) { + if (e.key === 'm' && e.ctrlKey) { if (this.selected_nodes) { for (var i in this.selected_nodes) { if (this.selected_nodes[i].mode === 2) { // never @@ -776,7 +820,8 @@ export class ComfyApp { block_default = true; } - if (e.keyCode == 66 && e.ctrlKey) { + // Ctrl + B bypass + if (e.key === 'b' && e.ctrlKey) { if (this.selected_nodes) { for (var i in this.selected_nodes) { if (this.selected_nodes[i].mode === 4) { // never @@ -788,6 +833,28 @@ export class ComfyApp { } block_default = true; } + + // Ctrl+C Copy + if ((e.key === 'c') && (e.metaKey || e.ctrlKey)) { + if (e.shiftKey) { + this.copyToClipboard(true); + block_default = true; + } + // Trigger default onCopy + return true; + } + + // Ctrl+V Paste + if ((e.key === 'v') && (e.metaKey || e.ctrlKey)) { + if (e.shiftKey) { + this.pasteFromClipboard(true); + block_default = true; + } + else { + // Trigger default onPaste + return true; + } + } } this.graph.change(); @@ -798,7 +865,8 @@ export class ComfyApp { return false; } - return res; + // Fall through to Litegraph defaults + return origProcessKey.apply(this, arguments); }; } @@ -1110,6 +1178,7 @@ export class ComfyApp { this.#addDrawGroupsHandler(); this.#addApiUpdateHandlers(); this.#addDropHandler(); + this.#addCopyHandler(); this.#addPasteHandler(); this.#addKeyboardHandler(); diff --git a/web/scripts/widgets.js b/web/scripts/widgets.js index 5a4644b13..45ac9b896 100644 --- a/web/scripts/widgets.js +++ b/web/scripts/widgets.js @@ -76,7 +76,7 @@ export function addValueControlWidget(node, targetWidget, defaultValue = "random targetWidget.value = max; } } - return valueControl; + return valueControl; }; function seedWidget(node, inputName, inputData, app) { @@ -387,11 +387,12 @@ export const ComfyWidgets = { } }); - async function uploadFile(file, updateNode) { + async function uploadFile(file, updateNode, pasted = false) { try { // Wrap file in formdata so it includes filename const body = new FormData(); body.append("image", file); + if (pasted) body.append("subfolder", "pasted"); const resp = await api.fetchApi("/upload/image", { method: "POST", body, @@ -399,15 +400,17 @@ export const ComfyWidgets = { if (resp.status === 200) { const data = await resp.json(); - // Add the file as an option and update the widget value - if (!imageWidget.options.values.includes(data.name)) { - imageWidget.options.values.push(data.name); + // Add the file to the dropdown list and update the widget value + let path = data.name; + if (data.subfolder) path = data.subfolder + "/" + path; + + if (!imageWidget.options.values.includes(path)) { + imageWidget.options.values.push(path); } if (updateNode) { - showImage(data.name); - - imageWidget.value = data.name; + showImage(path); + imageWidget.value = path; } } else { alert(resp.status + " - " + resp.statusText); @@ -460,6 +463,16 @@ export const ComfyWidgets = { return handled; }; + node.pasteFile = function(file) { + if (file.type.startsWith("image/")) { + const is_pasted = (file.name === "image.png") && + (file.lastModified - Date.now() < 2000); + uploadFile(file, true, is_pasted); + return true; + } + return false; + } + return { widget: uploadWidget }; }, }; From 1938f5c5fe479996802c46d5c2233887e3598a40 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 4 Sep 2023 00:58:18 -0400 Subject: [PATCH 05/31] Add a force argument to soft_empty_cache to force a cache empty. --- comfy/ldm/modules/attention.py | 2 +- comfy/ldm/modules/diffusionmodules/model.py | 1 + comfy/model_management.py | 4 ++-- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/comfy/ldm/modules/attention.py b/comfy/ldm/modules/attention.py index 8f953d337..34484b288 100644 --- a/comfy/ldm/modules/attention.py +++ b/comfy/ldm/modules/attention.py @@ -323,7 +323,7 @@ class CrossAttentionDoggettx(nn.Module): break except model_management.OOM_EXCEPTION as e: if first_op_done == False: - model_management.soft_empty_cache() + model_management.soft_empty_cache(True) if cleared_cache == False: cleared_cache = True print("out of memory error, emptying cache and trying again") diff --git a/comfy/ldm/modules/diffusionmodules/model.py b/comfy/ldm/modules/diffusionmodules/model.py index 431548483..5f38640c3 100644 --- a/comfy/ldm/modules/diffusionmodules/model.py +++ b/comfy/ldm/modules/diffusionmodules/model.py @@ -186,6 +186,7 @@ def slice_attention(q, k, v): del s2 break except model_management.OOM_EXCEPTION as e: + model_management.soft_empty_cache(True) steps *= 2 if steps > 128: raise e diff --git a/comfy/model_management.py b/comfy/model_management.py index bdbbbd843..b663e8f59 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -639,14 +639,14 @@ def should_use_fp16(device=None, model_params=0, prioritize_performance=True): return True -def soft_empty_cache(): +def soft_empty_cache(force=False): global cpu_state if cpu_state == CPUState.MPS: torch.mps.empty_cache() elif is_intel_xpu(): torch.xpu.empty_cache() elif torch.cuda.is_available(): - if is_nvidia(): #This seems to make things worse on ROCm so I only do it for cuda + if force or is_nvidia(): #This seems to make things worse on ROCm so I only do it for cuda torch.cuda.empty_cache() torch.cuda.ipc_collect() From d19684707922af9c2399307c8d9bccc1b267cc3b Mon Sep 17 00:00:00 2001 From: Michael Poutre Date: Wed, 23 Aug 2023 16:37:31 -0700 Subject: [PATCH 06/31] feat: Add support for excluded_dirs to folder_paths.recursive_search Refactored variable names to better match what they represent --- folder_paths.py | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/folder_paths.py b/folder_paths.py index e321690dd..16de1bb66 100644 --- a/folder_paths.py +++ b/folder_paths.py @@ -121,18 +121,25 @@ def add_model_folder_path(folder_name, full_folder_path): def get_folder_paths(folder_name): return folder_names_and_paths[folder_name][0][:] -def recursive_search(directory): +def recursive_search(directory, excluded_dir_names=None): if not os.path.isdir(directory): return [], {} + + if excluded_dir_names is None: + excluded_dir_names = [] + result = [] dirs = {directory: os.path.getmtime(directory)} - for root, subdir, file in os.walk(directory, followlinks=True): - for filepath in file: - #we os.path,join directory with a blank string to generate a path separator at the end. - result.append(os.path.join(root, filepath).replace(os.path.join(directory,''),'')) - for d in subdir: - path = os.path.join(root, d) + for dirpath, subdirs, filenames in os.walk(directory, followlinks=True, topdown=True): + print("Checking directory: " + dirpath) + subdirs[:] = [d for d in subdirs if d not in excluded_dir_names] + for file_name in filenames: + relative_path = os.path.relpath(os.path.join(dirpath, file_name), directory) + result.append(relative_path) + for d in subdirs: + path = os.path.join(dirpath, d) dirs[path] = os.path.getmtime(path) + print("Returning from recursive_search" + repr(result)) return result, dirs def filter_files_extensions(files, extensions): From 3e00fa433216335a874bd408de421f9d65432daf Mon Sep 17 00:00:00 2001 From: Michael Poutre Date: Wed, 23 Aug 2023 16:50:41 -0700 Subject: [PATCH 07/31] feat: Exclude .git when retrieving filename lists In the future could support user provided excluded dirs via config file --- folder_paths.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/folder_paths.py b/folder_paths.py index 16de1bb66..a18052856 100644 --- a/folder_paths.py +++ b/folder_paths.py @@ -166,7 +166,7 @@ def get_filename_list_(folder_name): folders = folder_names_and_paths[folder_name] output_folders = {} for x in folders[0]: - files, folders_all = recursive_search(x) + files, folders_all = recursive_search(x, excluded_dir_names=[".git"]) output_list.update(filter_files_extensions(files, folders[1])) output_folders = {**output_folders, **folders_all} From f368e5ac7d31649b22c0c1e44bc9fa8002fcb117 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 5 Sep 2023 01:22:03 -0400 Subject: [PATCH 08/31] Don't paste nodes when target is a textarea or a text box. --- web/scripts/app.js | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/web/scripts/app.js b/web/scripts/app.js index 7f5073573..9c380d3fb 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -717,8 +717,12 @@ export class ComfyApp { this.loadGraphData(workflow); } else { + if (e.target.type === "text" || e.target.type === "textarea") { + return; + } + // Litegraph default paste - this.canvas.pasteFromClipboard(); + this.canvas.pasteFromClipboard(); } From bc1f6e21856f7be25db5c5c2956b89c27db93b3d Mon Sep 17 00:00:00 2001 From: Michael Poutre Date: Tue, 5 Sep 2023 15:06:46 -0700 Subject: [PATCH 09/31] fix(ui/widgets): Only set widget forceInput option if a widget is added --- web/scripts/app.js | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/web/scripts/app.js b/web/scripts/app.js index 9c380d3fb..a3661da64 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -1228,6 +1228,7 @@ export class ComfyApp { const inputData = inputs[inputName]; const type = inputData[0]; + let widgetCreated = true; if (Array.isArray(type)) { // Enums Object.assign(config, widgets.COMBO(this, inputName, inputData, app) || {}); @@ -1240,8 +1241,10 @@ export class ComfyApp { } else { // Node connection inputs this.addInput(inputName, type); + widgetCreated = false; } - if(inputData[1]?.forceInput && config?.widget) { + + if(widgetCreated && inputData[1]?.forceInput && config?.widget) { if (!config.widget.options) config.widget.options = {}; config.widget.options.forceInput = inputData[1].forceInput; } From 21a563d385ff520e1f7fdaada722212b35fb8d95 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 5 Sep 2023 23:46:37 -0400 Subject: [PATCH 10/31] Remove prints. --- folder_paths.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/folder_paths.py b/folder_paths.py index a18052856..82aedd43f 100644 --- a/folder_paths.py +++ b/folder_paths.py @@ -131,7 +131,6 @@ def recursive_search(directory, excluded_dir_names=None): result = [] dirs = {directory: os.path.getmtime(directory)} for dirpath, subdirs, filenames in os.walk(directory, followlinks=True, topdown=True): - print("Checking directory: " + dirpath) subdirs[:] = [d for d in subdirs if d not in excluded_dir_names] for file_name in filenames: relative_path = os.path.relpath(os.path.join(dirpath, file_name), directory) @@ -139,7 +138,6 @@ def recursive_search(directory, excluded_dir_names=None): for d in subdirs: path = os.path.join(dirpath, d) dirs[path] = os.path.getmtime(path) - print("Returning from recursive_search" + repr(result)) return result, dirs def filter_files_extensions(files, extensions): From f88f7f413afbe04b42c4422e9deedbaa3269ce76 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 6 Sep 2023 03:26:55 -0400 Subject: [PATCH 11/31] Add a ConditioningSetAreaPercentage node. --- comfy/samplers.py | 15 ++++++++++++--- nodes.py | 27 +++++++++++++++++++++++++++ 2 files changed, 39 insertions(+), 3 deletions(-) diff --git a/comfy/samplers.py b/comfy/samplers.py index 103ac33ff..3250b2edc 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -390,11 +390,20 @@ def get_mask_aabb(masks): return bounding_boxes, is_empty -def resolve_cond_masks(conditions, h, w, device): +def resolve_areas_and_cond_masks(conditions, h, w, device): # We need to decide on an area outside the sampling loop in order to properly generate opposite areas of equal sizes. # While we're doing this, we can also resolve the mask device and scaling for performance reasons for i in range(len(conditions)): c = conditions[i] + if 'area' in c[1]: + area = c[1]['area'] + if area[0] == "percentage": + modified = c[1].copy() + area = (max(1, round(area[1] * h)), max(1, round(area[2] * w)), round(area[3] * h), round(area[4] * w)) + modified['area'] = area + c = [c[0], modified] + conditions[i] = c + if 'mask' in c[1]: mask = c[1]['mask'] mask = mask.to(device=device) @@ -622,8 +631,8 @@ class KSampler: positive = positive[:] negative = negative[:] - resolve_cond_masks(positive, noise.shape[2], noise.shape[3], self.device) - resolve_cond_masks(negative, noise.shape[2], noise.shape[3], self.device) + resolve_areas_and_cond_masks(positive, noise.shape[2], noise.shape[3], self.device) + resolve_areas_and_cond_masks(negative, noise.shape[2], noise.shape[3], self.device) calculate_start_end_timesteps(self.model_wrap, negative) calculate_start_end_timesteps(self.model_wrap, positive) diff --git a/nodes.py b/nodes.py index fa26e5939..77d180526 100644 --- a/nodes.py +++ b/nodes.py @@ -159,6 +159,31 @@ class ConditioningSetArea: c.append(n) return (c, ) +class ConditioningSetAreaPercentage: + @classmethod + def INPUT_TYPES(s): + return {"required": {"conditioning": ("CONDITIONING", ), + "width": ("FLOAT", {"default": 1.0, "min": 0, "max": 1.0, "step": 0.01}), + "height": ("FLOAT", {"default": 1.0, "min": 0, "max": 1.0, "step": 0.01}), + "x": ("FLOAT", {"default": 0, "min": 0, "max": 1.0, "step": 0.01}), + "y": ("FLOAT", {"default": 0, "min": 0, "max": 1.0, "step": 0.01}), + "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), + }} + RETURN_TYPES = ("CONDITIONING",) + FUNCTION = "append" + + CATEGORY = "conditioning" + + def append(self, conditioning, width, height, x, y, strength): + c = [] + for t in conditioning: + n = [t[0], t[1].copy()] + n[1]['area'] = ("percentage", height, width, y, x) + n[1]['strength'] = strength + n[1]['set_area_to_bounds'] = False + c.append(n) + return (c, ) + class ConditioningSetMask: @classmethod def INPUT_TYPES(s): @@ -1583,6 +1608,7 @@ NODE_CLASS_MAPPINGS = { "ConditioningCombine": ConditioningCombine, "ConditioningConcat": ConditioningConcat, "ConditioningSetArea": ConditioningSetArea, + "ConditioningSetAreaPercentage": ConditioningSetAreaPercentage, "ConditioningSetMask": ConditioningSetMask, "KSamplerAdvanced": KSamplerAdvanced, "SetLatentNoiseMask": SetLatentNoiseMask, @@ -1644,6 +1670,7 @@ NODE_DISPLAY_NAME_MAPPINGS = { "ConditioningAverage ": "Conditioning (Average)", "ConditioningConcat": "Conditioning (Concat)", "ConditioningSetArea": "Conditioning (Set Area)", + "ConditioningSetAreaPercentage": "Conditioning (Set Area with Percentage)", "ConditioningSetMask": "Conditioning (Set Mask)", "ControlNetApply": "Apply ControlNet", "ControlNetApplyAdvanced": "Apply ControlNet (Advanced)", From cb080e771e1e792e18611ef63d2d6a49aa50a524 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 6 Sep 2023 16:18:02 -0400 Subject: [PATCH 12/31] Lower refresh timeout for search in litegraph. --- web/lib/litegraph.core.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/lib/litegraph.core.js b/web/lib/litegraph.core.js index 4bb2f0d99..4a21a1b34 100644 --- a/web/lib/litegraph.core.js +++ b/web/lib/litegraph.core.js @@ -11529,7 +11529,7 @@ LGraphNode.prototype.executeAction = function(action) if (timeout) { clearInterval(timeout); } - timeout = setTimeout(refreshHelper, 250); + timeout = setTimeout(refreshHelper, 10); return; } e.preventDefault(); From adb9eb94b0d825bb904d449cf259e7da66453a17 Mon Sep 17 00:00:00 2001 From: Chris Date: Thu, 7 Sep 2023 12:10:52 +1000 Subject: [PATCH 13/31] Send class description if any --- server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server.py b/server.py index 57d5a65df..e84e698d6 100644 --- a/server.py +++ b/server.py @@ -398,7 +398,7 @@ class PromptServer(): info['output_name'] = obj_class.RETURN_NAMES if hasattr(obj_class, 'RETURN_NAMES') else info['output'] info['name'] = node_class info['display_name'] = nodes.NODE_DISPLAY_NAME_MAPPINGS[node_class] if node_class in nodes.NODE_DISPLAY_NAME_MAPPINGS.keys() else node_class - info['description'] = '' + info['description'] = obj_class.DESCRIPTION if hasattr(node_class,'DESCRIPTION') else '' info['category'] = 'sd' if hasattr(obj_class, 'OUTPUT_NODE') and obj_class.OUTPUT_NODE == True: info['output_node'] = True From 694c705f5225be458ce3cf1db34531c17925e20d Mon Sep 17 00:00:00 2001 From: Chris Date: Thu, 7 Sep 2023 12:20:37 +1000 Subject: [PATCH 14/31] get class description --- server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server.py b/server.py index e84e698d6..2ebf9e235 100644 --- a/server.py +++ b/server.py @@ -398,7 +398,7 @@ class PromptServer(): info['output_name'] = obj_class.RETURN_NAMES if hasattr(obj_class, 'RETURN_NAMES') else info['output'] info['name'] = node_class info['display_name'] = nodes.NODE_DISPLAY_NAME_MAPPINGS[node_class] if node_class in nodes.NODE_DISPLAY_NAME_MAPPINGS.keys() else node_class - info['description'] = obj_class.DESCRIPTION if hasattr(node_class,'DESCRIPTION') else '' + info['description'] = obj_class.DESCRIPTION if hasattr(obj_class,'DESCRIPTION') else '' info['category'] = 'sd' if hasattr(obj_class, 'OUTPUT_NODE') and obj_class.OUTPUT_NODE == True: info['output_node'] = True From 8be46438be1c848e01e4085f54ae997e2e918771 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 7 Sep 2023 03:31:43 -0400 Subject: [PATCH 15/31] Support DiffBIR SwinIR models. --- .../chainner_models/architecture/SwinIR.py | 17 ++++++++++++++++- comfy_extras/nodes_upscale_model.py | 2 ++ 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/comfy_extras/chainner_models/architecture/SwinIR.py b/comfy_extras/chainner_models/architecture/SwinIR.py index 1abf450bb..439dcbcb2 100644 --- a/comfy_extras/chainner_models/architecture/SwinIR.py +++ b/comfy_extras/chainner_models/architecture/SwinIR.py @@ -846,6 +846,7 @@ class SwinIR(nn.Module): num_in_ch = in_chans num_out_ch = in_chans supports_fp16 = True + self.start_unshuffle = 1 self.model_arch = "SwinIR" self.sub_type = "SR" @@ -874,6 +875,11 @@ class SwinIR(nn.Module): else 64 ) + if "conv_first.1.weight" in self.state: + self.state["conv_first.weight"] = self.state.pop("conv_first.1.weight") + self.state["conv_first.bias"] = self.state.pop("conv_first.1.bias") + self.start_unshuffle = round(math.sqrt(self.state["conv_first.weight"].shape[1] // 3)) + num_in_ch = self.state["conv_first.weight"].shape[1] in_chans = num_in_ch if "conv_last.weight" in state_keys: @@ -968,7 +974,7 @@ class SwinIR(nn.Module): self.depths = depths self.window_size = window_size self.mlp_ratio = mlp_ratio - self.scale = upscale + self.scale = upscale / self.start_unshuffle self.upsampler = upsampler self.img_size = img_size self.img_range = img_range @@ -1101,6 +1107,9 @@ class SwinIR(nn.Module): self.conv_up1 = nn.Conv2d(num_feat, num_feat, 3, 1, 1) if self.upscale == 4: self.conv_up2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1) + elif self.upscale == 8: + self.conv_up2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1) + self.conv_up3 = nn.Conv2d(num_feat, num_feat, 3, 1, 1) self.conv_hr = nn.Conv2d(num_feat, num_feat, 3, 1, 1) self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1) self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True) @@ -1157,6 +1166,9 @@ class SwinIR(nn.Module): self.mean = self.mean.type_as(x) x = (x - self.mean) * self.img_range + if self.start_unshuffle > 1: + x = torch.nn.functional.pixel_unshuffle(x, self.start_unshuffle) + if self.upsampler == "pixelshuffle": # for classical SR x = self.conv_first(x) @@ -1186,6 +1198,9 @@ class SwinIR(nn.Module): ) ) ) + elif self.upscale == 8: + x = self.lrelu(self.conv_up2(torch.nn.functional.interpolate(x, scale_factor=2, mode='nearest'))) + x = self.lrelu(self.conv_up3(torch.nn.functional.interpolate(x, scale_factor=2, mode='nearest'))) x = self.conv_last(self.lrelu(self.conv_hr(x))) else: # for image denoising and JPEG compression artifact reduction diff --git a/comfy_extras/nodes_upscale_model.py b/comfy_extras/nodes_upscale_model.py index abd182e6e..2b5e49a55 100644 --- a/comfy_extras/nodes_upscale_model.py +++ b/comfy_extras/nodes_upscale_model.py @@ -18,6 +18,8 @@ class UpscaleModelLoader: def load_model(self, model_name): model_path = folder_paths.get_full_path("upscale_models", model_name) sd = comfy.utils.load_torch_file(model_path, safe_load=True) + if "module.layers.0.residual_group.blocks.0.norm1.weight" in sd: + sd = comfy.utils.state_dict_prefix_replace(sd, {"module.":""}) out = model_loading.load_state_dict(sd).eval() return (out, ) From 62799c858575a2a69e671560efa7eb8001e0e275 Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Thu, 7 Sep 2023 18:42:21 +0100 Subject: [PATCH 16/31] fix crash on node with VALIDATE_INPUTS and actual inputs --- execution.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/execution.py b/execution.py index e10fdbb60..5f5d6c738 100644 --- a/execution.py +++ b/execution.py @@ -21,7 +21,8 @@ def get_input_data(inputs, class_def, unique_id, outputs={}, prompt={}, extra_da input_unique_id = input_data[0] output_index = input_data[1] if input_unique_id not in outputs: - return None + input_data_all[x] = (None,) + continue obj = outputs[input_unique_id][output_index] input_data_all[x] = obj else: From d6d1a8998fa60da9265ea3e9db35d80441cac6fd Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 7 Sep 2023 18:06:22 -0400 Subject: [PATCH 17/31] Properly check upload filename for directory transversal. --- server.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/server.py b/server.py index 2ebf9e235..e58a11d86 100644 --- a/server.py +++ b/server.py @@ -170,15 +170,15 @@ class PromptServer(): subfolder = post.get("subfolder", "") full_output_folder = os.path.join(upload_dir, os.path.normpath(subfolder)) + filepath = os.path.join(full_output_folder, filename) - if os.path.commonpath((upload_dir, os.path.abspath(full_output_folder))) != upload_dir: + if os.path.commonpath((upload_dir, os.path.abspath(filepath))) != upload_dir: return web.Response(status=400) if not os.path.exists(full_output_folder): os.makedirs(full_output_folder) split = os.path.splitext(filename) - filepath = os.path.join(full_output_folder, filename) if overwrite is not None and (overwrite == "true" or overwrite == "1"): pass From 9261587d8975bb0c3f929433345e9918bf659460 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 7 Sep 2023 18:14:30 -0400 Subject: [PATCH 18/31] Small refactor. --- server.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/server.py b/server.py index e58a11d86..be33f4100 100644 --- a/server.py +++ b/server.py @@ -170,9 +170,9 @@ class PromptServer(): subfolder = post.get("subfolder", "") full_output_folder = os.path.join(upload_dir, os.path.normpath(subfolder)) - filepath = os.path.join(full_output_folder, filename) + filepath = os.path.abspath(os.path.join(full_output_folder, filename)) - if os.path.commonpath((upload_dir, os.path.abspath(filepath))) != upload_dir: + if os.path.commonpath((upload_dir, filepath)) != upload_dir: return web.Response(status=400) if not os.path.exists(full_output_folder): From 326577d04c99590cbf91324f507fdc2c7d37832d Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 7 Sep 2023 23:37:03 -0400 Subject: [PATCH 19/31] Allow cancelling of everything with a progress bar. --- comfy/samplers.py | 2 -- main.py | 1 + 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/comfy/samplers.py b/comfy/samplers.py index 3250b2edc..c60288fd1 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -263,8 +263,6 @@ def sampling_function(model_function, x, timestep, uncond, cond, cond_scale, con output = model_function(input_x, timestep_, **c).chunk(batch_chunks) del input_x - model_management.throw_exception_if_processing_interrupted() - for o in range(batch_chunks): if cond_or_uncond[o] == COND: out_cond[:,:,area[o][2]:area[o][0] + area[o][2],area[o][3]:area[o][1] + area[o][3]] += output[o] * mult[o] diff --git a/main.py b/main.py index a4038db4b..9f0f80458 100644 --- a/main.py +++ b/main.py @@ -104,6 +104,7 @@ async def run(server, address='', port=8188, verbose=True, call_on_start=None): def hijack_progress(server): def hook(value, total, preview_image): + comfy.model_management.throw_exception_if_processing_interrupted() server.send_sync("progress", {"value": value, "max": total}, server.client_id) if preview_image is not None: server.send_sync(BinaryEventTypes.UNENCODED_PREVIEW_IMAGE, preview_image, server.client_id) From 0782ac2a96fab2c436f78379db1de0df9737aa1d Mon Sep 17 00:00:00 2001 From: Chris Date: Fri, 8 Sep 2023 14:53:29 +1000 Subject: [PATCH 20/31] defaultInput --- web/extensions/core/widgetInputs.js | 2 +- web/scripts/app.js | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/web/extensions/core/widgetInputs.js b/web/extensions/core/widgetInputs.js index f9a5b7278..606605f0a 100644 --- a/web/extensions/core/widgetInputs.js +++ b/web/extensions/core/widgetInputs.js @@ -142,7 +142,7 @@ app.registerExtension({ const r = origOnNodeCreated ? origOnNodeCreated.apply(this) : undefined; if (this.widgets) { for (const w of this.widgets) { - if (w?.options?.forceInput) { + if (w?.options?.forceInput || w?.options?.defaultInput) { const config = nodeData?.input?.required[w.name] || nodeData?.input?.optional?.[w.name] || [w.type, w.options || {}]; convertToInput(this, w, config); } diff --git a/web/scripts/app.js b/web/scripts/app.js index a3661da64..40295b350 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -1248,6 +1248,10 @@ export class ComfyApp { if (!config.widget.options) config.widget.options = {}; config.widget.options.forceInput = inputData[1].forceInput; } + if(widgetCreated && inputData[1]?.defaultInput && config?.widget) { + if (!config.widget.options) config.widget.options = {}; + config.widget.options.defaultInput = inputData[1].defaultInput; + } } for (const o in nodeData["output"]) { From ff962098fdf4486fa5117e268187af964aaf586d Mon Sep 17 00:00:00 2001 From: MoonRide303 Date: Fri, 8 Sep 2023 08:43:17 +0200 Subject: [PATCH 21/31] Fixed Load Image preview not displaying some files (issue #1158) --- web/scripts/widgets.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/scripts/widgets.js b/web/scripts/widgets.js index 45ac9b896..975577631 100644 --- a/web/scripts/widgets.js +++ b/web/scripts/widgets.js @@ -335,7 +335,7 @@ export const ComfyWidgets = { subfolder = name.substring(0, folder_separator); name = name.substring(folder_separator + 1); } - img.src = api.apiURL(`/view?filename=${name}&type=input&subfolder=${subfolder}${app.getPreviewFormatParam()}`); + img.src = api.apiURL(`/view?filename=${encodeURIComponent(name)}&type=input&subfolder=${subfolder}${app.getPreviewFormatParam()}`); node.setSizeForImage?.(); } From 3ebe6b539a510c59004a6bb3b4cdb833a5612431 Mon Sep 17 00:00:00 2001 From: Chris Date: Fri, 8 Sep 2023 20:37:55 +1000 Subject: [PATCH 22/31] round float widgets (by default to 0.001) --- web/scripts/widgets.js | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/web/scripts/widgets.js b/web/scripts/widgets.js index 45ac9b896..68862a68c 100644 --- a/web/scripts/widgets.js +++ b/web/scripts/widgets.js @@ -2,14 +2,15 @@ import { api } from "./api.js" function getNumberDefaults(inputData, defaultStep) { let defaultVal = inputData[1]["default"]; - let { min, max, step } = inputData[1]; + let { min, max, step, round } = inputData[1]; if (defaultVal == undefined) defaultVal = 0; if (min == undefined) min = 0; if (max == undefined) max = 2048; if (step == undefined) step = defaultStep; + if (round == undefined) round = 0.001; - return { val: defaultVal, config: { min, max, step: 10.0 * step } }; + return { val: defaultVal, config: { min, max, step: 10.0 * step, round } }; } export function addValueControlWidget(node, targetWidget, defaultValue = "randomize", values) { @@ -264,7 +265,10 @@ export const ComfyWidgets = { FLOAT(node, inputName, inputData, app) { let widgetType = isSlider(inputData[1]["display"], app); const { val, config } = getNumberDefaults(inputData, 0.5); - return { widget: node.addWidget(widgetType, inputName, val, () => {}, config) }; + return { widget: node.addWidget(widgetType, inputName, val, + function (v) { + this.value = Math.round(v/config.round)*config.round; + }, config) }; }, INT(node, inputName, inputData, app) { let widgetType = isSlider(inputData[1]["display"], app); From 1e6b67101cad777319e891afce3c7120e0dc1273 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 8 Sep 2023 11:36:51 -0400 Subject: [PATCH 23/31] Support diffusers format t2i adapters. --- comfy/controlnet.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/comfy/controlnet.py b/comfy/controlnet.py index 490be6bbc..af0df103e 100644 --- a/comfy/controlnet.py +++ b/comfy/controlnet.py @@ -449,10 +449,18 @@ class T2IAdapter(ControlBase): return c def load_t2i_adapter(t2i_data): - keys = t2i_data.keys() - if 'adapter' in keys: + if 'adapter' in t2i_data: t2i_data = t2i_data['adapter'] - keys = t2i_data.keys() + if 'adapter.body.0.resnets.0.block1.weight' in t2i_data: #diffusers format + prefix_replace = {} + for i in range(4): + for j in range(2): + prefix_replace["adapter.body.{}.resnets.{}.".format(i, j)] = "body.{}.".format(i * 2 + j) + prefix_replace["adapter.body.{}.".format(i, j)] = "body.{}.".format(i * 2) + prefix_replace["adapter."] = "" + t2i_data = comfy.utils.state_dict_prefix_replace(t2i_data, prefix_replace) + keys = t2i_data.keys() + if "body.0.in_conv.weight" in keys: cin = t2i_data['body.0.in_conv.weight'].shape[1] model_ad = comfy.t2i_adapter.adapter.Adapter_light(cin=cin, channels=[320, 640, 1280, 1280], nums_rb=4) From 264867bf87c37abdf794c9e1bab1bc512c2f5ff4 Mon Sep 17 00:00:00 2001 From: Michael Abrahams Date: Fri, 8 Sep 2023 11:17:45 -0400 Subject: [PATCH 24/31] Clear clipboard on copy --- web/scripts/app.js | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/web/scripts/app.js b/web/scripts/app.js index a3661da64..72844a92b 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -735,9 +735,17 @@ export class ComfyApp { */ #addCopyHandler() { document.addEventListener("copy", (e) => { - // copy + if (e.target.type === "text" || e.target.type === "textarea") { + // Default system copy + return; + } + // copy nodes and clear clipboard if (this.canvas.selected_nodes) { - this.canvas.copyToClipboard(); + this.canvas.copyToClipboard(); + e.clipboardData.clearData(); + e.preventDefault(); + e.stopImmediatePropagation(); + return false; } }); } @@ -842,10 +850,13 @@ export class ComfyApp { if ((e.key === 'c') && (e.metaKey || e.ctrlKey)) { if (e.shiftKey) { this.copyToClipboard(true); + e.clipboardData.clearData(); block_default = true; } - // Trigger default onCopy - return true; + else { + // Trigger onCopy + return true; + } } // Ctrl+V Paste @@ -855,7 +866,7 @@ export class ComfyApp { block_default = true; } else { - // Trigger default onPaste + // Trigger onPaste return true; } } From 10de64af7f1ea22e08e39267ade7ef7f8b1607fe Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 8 Sep 2023 14:02:03 -0400 Subject: [PATCH 25/31] Google doesn't want people to use ComfyUI on colab anymore. --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index baa8cf8b6..d83b4bdac 100644 --- a/README.md +++ b/README.md @@ -77,9 +77,9 @@ Simply download, extract with [7-Zip](https://7-zip.org) and run. Make sure you See the [Config file](extra_model_paths.yaml.example) to set the search paths for models. In the standalone windows build you can find this file in the ComfyUI directory. Rename this file to extra_model_paths.yaml and edit it with your favorite text editor. -## Colab Notebook +## Jupyter Notebook -To run it on colab or paperspace you can use my [Colab Notebook](notebooks/comfyui_colab.ipynb) here: [Link to open with google colab](https://colab.research.google.com/github/comfyanonymous/ComfyUI/blob/master/notebooks/comfyui_colab.ipynb) +To run it on services like paperspace, kaggle or colab you can use my [Jupyter Notebook](notebooks/comfyui_colab.ipynb) ## Manual Install (Windows, Linux) From e85be36bd2c12f335abdf75669b994c535bbb126 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 8 Sep 2023 14:06:58 -0400 Subject: [PATCH 26/31] Add a penultimate_hidden_states to the clip vision output. --- comfy/clip_vision.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/comfy/clip_vision.py b/comfy/clip_vision.py index daaa2f2bf..9b95ae003 100644 --- a/comfy/clip_vision.py +++ b/comfy/clip_vision.py @@ -49,12 +49,16 @@ class ClipVisionModel(): precision_scope = lambda a, b: contextlib.nullcontext(a) with precision_scope(comfy.model_management.get_autocast_device(self.load_device), torch.float32): - outputs = self.model(pixel_values=pixel_values) + outputs = self.model(pixel_values=pixel_values, output_hidden_states=True) for k in outputs: t = outputs[k] if t is not None: - outputs[k] = t.cpu() + if k == 'hidden_states': + outputs["penultimate_hidden_states"] = t[-2].cpu() + else: + outputs[k] = t.cpu() + return outputs def convert_to_transformers(sd, prefix): From cc2fa311ddf5e085177e219c0fd2d1fd036551db Mon Sep 17 00:00:00 2001 From: Michael Poutre Date: Fri, 8 Sep 2023 21:11:53 -0700 Subject: [PATCH 27/31] fix(server): Disable access logs --- server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server.py b/server.py index be33f4100..d04060499 100644 --- a/server.py +++ b/server.py @@ -603,7 +603,7 @@ class PromptServer(): await self.send(*msg) async def start(self, address, port, verbose=True, call_on_start=None): - runner = web.AppRunner(self.app) + runner = web.AppRunner(self.app, access_log=None) await runner.setup() site = web.TCPSite(runner, address, port) await site.start() From 7372255e49b88fc0bb8416faff05ebcd88c81aba Mon Sep 17 00:00:00 2001 From: Chris Date: Sat, 9 Sep 2023 15:21:38 +1000 Subject: [PATCH 28/31] Specify the precision and rounding based on step --- web/scripts/widgets.js | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/web/scripts/widgets.js b/web/scripts/widgets.js index 68862a68c..8f7537b73 100644 --- a/web/scripts/widgets.js +++ b/web/scripts/widgets.js @@ -2,15 +2,19 @@ import { api } from "./api.js" function getNumberDefaults(inputData, defaultStep) { let defaultVal = inputData[1]["default"]; - let { min, max, step, round } = inputData[1]; + let { min, max, step, round, precision } = inputData[1]; if (defaultVal == undefined) defaultVal = 0; if (min == undefined) min = 0; if (max == undefined) max = 2048; if (step == undefined) step = defaultStep; - if (round == undefined) round = 0.001; +// precision is the number of decimal places to show. +// by default, display the the smallest number of decimal places such that changes of size step are visible. + if (precision == undefined) precision = Math.max(-Math.floor(Math.log10(step)),0) +// by default, round the value to those decimal places shown. + if (round == undefined) round = Math.round(1000000*Math.pow(0.1,precision))/1000000; - return { val: defaultVal, config: { min, max, step: 10.0 * step, round } }; + return { val: defaultVal, config: { min, max, step: 10.0 * step, round, precision } }; } export function addValueControlWidget(node, targetWidget, defaultValue = "randomize", values) { From 07691e80c3bf9be16c629169e259105ca5327bf0 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sat, 9 Sep 2023 03:15:31 -0400 Subject: [PATCH 29/31] Does it make sense to allow configuring the round and precision? --- web/scripts/widgets.js | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/web/scripts/widgets.js b/web/scripts/widgets.js index 4dc173b8f..30caa6a8c 100644 --- a/web/scripts/widgets.js +++ b/web/scripts/widgets.js @@ -2,7 +2,7 @@ import { api } from "./api.js" function getNumberDefaults(inputData, defaultStep) { let defaultVal = inputData[1]["default"]; - let { min, max, step, round, precision } = inputData[1]; + let { min, max, step } = inputData[1]; if (defaultVal == undefined) defaultVal = 0; if (min == undefined) min = 0; @@ -10,9 +10,9 @@ function getNumberDefaults(inputData, defaultStep) { if (step == undefined) step = defaultStep; // precision is the number of decimal places to show. // by default, display the the smallest number of decimal places such that changes of size step are visible. - if (precision == undefined) precision = Math.max(-Math.floor(Math.log10(step)),0) + let precision = Math.max(-Math.floor(Math.log10(step)),0) // by default, round the value to those decimal places shown. - if (round == undefined) round = Math.round(1000000*Math.pow(0.1,precision))/1000000; + let round = Math.round(1000000*Math.pow(0.1,precision))/1000000; return { val: defaultVal, config: { min, max, step: 10.0 * step, round, precision } }; } From 7df822212fb2da45c8523155086456c2cd119062 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sun, 10 Sep 2023 02:36:04 -0400 Subject: [PATCH 30/31] Allow checkpoints with .pt and .bin extensions. --- folder_paths.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/folder_paths.py b/folder_paths.py index 82aedd43f..4a10c68e7 100644 --- a/folder_paths.py +++ b/folder_paths.py @@ -1,14 +1,13 @@ import os import time -supported_ckpt_extensions = set(['.ckpt', '.pth', '.safetensors']) supported_pt_extensions = set(['.ckpt', '.pt', '.bin', '.pth', '.safetensors']) folder_names_and_paths = {} base_path = os.path.dirname(os.path.realpath(__file__)) models_dir = os.path.join(base_path, "models") -folder_names_and_paths["checkpoints"] = ([os.path.join(models_dir, "checkpoints")], supported_ckpt_extensions) +folder_names_and_paths["checkpoints"] = ([os.path.join(models_dir, "checkpoints")], supported_pt_extensions) folder_names_and_paths["configs"] = ([os.path.join(models_dir, "configs")], [".yaml"]) folder_names_and_paths["loras"] = ([os.path.join(models_dir, "loras")], supported_pt_extensions) From 9562a6b49e63e63a16f3e45ff4965f72385f51fa Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sun, 10 Sep 2023 11:19:31 -0400 Subject: [PATCH 31/31] Fix a few clipboard issues. --- web/scripts/app.js | 25 ++++++------------------- 1 file changed, 6 insertions(+), 19 deletions(-) diff --git a/web/scripts/app.js b/web/scripts/app.js index 7ef2fc4e3..9db4e9230 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -742,7 +742,7 @@ export class ComfyApp { // copy nodes and clear clipboard if (this.canvas.selected_nodes) { this.canvas.copyToClipboard(); - e.clipboardData.clearData(); + e.clipboardData.setData('text', ' '); //clearData doesn't remove images from clipboard e.preventDefault(); e.stopImmediatePropagation(); return false; @@ -848,27 +848,14 @@ export class ComfyApp { // Ctrl+C Copy if ((e.key === 'c') && (e.metaKey || e.ctrlKey)) { - if (e.shiftKey) { - this.copyToClipboard(true); - e.clipboardData.clearData(); - block_default = true; - } - else { - // Trigger onCopy - return true; - } + // Trigger onCopy + return true; } // Ctrl+V Paste - if ((e.key === 'v') && (e.metaKey || e.ctrlKey)) { - if (e.shiftKey) { - this.pasteFromClipboard(true); - block_default = true; - } - else { - // Trigger onPaste - return true; - } + if ((e.key === 'v' || e.key == 'V') && (e.metaKey || e.ctrlKey)) { + // Trigger onPaste + return true; } }