From 967bfce0f3b06bcb7e562b9296960d320e978898 Mon Sep 17 00:00:00 2001 From: Jairo Correa Date: Sun, 26 Mar 2023 21:40:36 -0300 Subject: [PATCH 1/7] Optional RETURN_NAMES to set the output name --- server.py | 1 + web/scripts/app.js | 6 ++++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/server.py b/server.py index e71289cdf..80fb2dc72 100644 --- a/server.py +++ b/server.py @@ -152,6 +152,7 @@ class PromptServer(): info = {} info['input'] = obj_class.INPUT_TYPES() info['output'] = obj_class.RETURN_TYPES + info['output_name'] = obj_class.RETURN_NAMES if hasattr(obj_class, 'RETURN_NAMES') else info['output'] info['name'] = x #TODO info['description'] = '' info['category'] = 'sd' diff --git a/web/scripts/app.js b/web/scripts/app.js index a743ef63b..e9c869249 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -611,8 +611,10 @@ class ComfyApp { } } - for (const output of nodeData["output"]) { - this.addOutput(output, output); + for (const o in nodeData["output"]) { + const output = nodeData["output"][o]; + const outputName = nodeData["output_name"][o] || output; + this.addOutput(outputName, output); } const s = this.computeSize(); From 63525ee83ce5adaea6831b1e43df721d47c2e709 Mon Sep 17 00:00:00 2001 From: Jairo Correa Date: Sun, 26 Mar 2023 22:53:49 -0300 Subject: [PATCH 2/7] Move group by header --- web/scripts/app.js | 93 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 93 insertions(+) diff --git a/web/scripts/app.js b/web/scripts/app.js index a743ef63b..c5f90b116 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -371,6 +371,96 @@ class ComfyApp { }); } + /** + * Handle mouse + * + * Move group by header + */ + #addProcessMouseHandler() { + const self = this; + + const origProcessMouseDown = LGraphCanvas.prototype.processMouseDown; + LGraphCanvas.prototype.processMouseDown = function(e) { + const res = origProcessMouseDown.apply(this, arguments); + + this.selected_group_moving = false; + + if (this.selected_group && !this.selected_group_resizing) { + var font_size = + this.selected_group.font_size || LiteGraph.DEFAULT_GROUP_FONT_SIZE; + var height = font_size * 1.4; + + // Move group by header + if (LiteGraph.isInsideRectangle(e.canvasX, e.canvasY, this.selected_group.pos[0], this.selected_group.pos[1], this.selected_group.size[0], height)) { + this.selected_group_moving = true; + } + } + + return res; + } + + const origProcessMouseMove = LGraphCanvas.prototype.processMouseMove; + LGraphCanvas.prototype.processMouseMove = function(e) { + const orig_selected_group = this.selected_group; + + if (this.selected_group && !this.selected_group_resizing && !this.selected_group_moving) { + this.selected_group = null; + } + + const res = origProcessMouseMove.apply(this, arguments); + + if (orig_selected_group && !this.selected_group_resizing && !this.selected_group_moving) { + this.selected_group = orig_selected_group; + } + + return res; + }; + } + + /** + * Draws group header bar + */ + #addDrawGroupsHandler() { + const self = this; + + const origDrawGroups = LGraphCanvas.prototype.drawGroups; + LGraphCanvas.prototype.drawGroups = function(canvas, ctx) { + if (!this.graph) { + return; + } + + var groups = this.graph._groups; + + ctx.save(); + ctx.globalAlpha = 0.7 * this.editor_alpha; + + for (var i = 0; i < groups.length; ++i) { + var group = groups[i]; + + if (!LiteGraph.overlapBounding(this.visible_area, group._bounding)) { + continue; + } //out of the visible area + + ctx.fillStyle = group.color || "#335"; + ctx.strokeStyle = group.color || "#335"; + var pos = group._pos; + var size = group._size; + ctx.globalAlpha = 0.25 * this.editor_alpha; + ctx.beginPath(); + var font_size = + group.font_size || LiteGraph.DEFAULT_GROUP_FONT_SIZE; + ctx.rect(pos[0] + 0.5, pos[1] + 0.5, size[0], font_size * 1.4); + ctx.fill(); + ctx.globalAlpha = this.editor_alpha; + } + + ctx.restore(); + + const res = origDrawGroups.apply(this, arguments); + return res; + } + } + /** * Draws node highlights (executing, drag drop) and progress bar */ @@ -518,6 +608,8 @@ class ComfyApp { canvasEl.tabIndex = "1"; document.body.prepend(canvasEl); + this.#addProcessMouseHandler(); + this.graph = new LGraph(); const canvas = (this.canvas = new LGraphCanvas(canvasEl, this.graph)); this.ctx = canvasEl.getContext("2d"); @@ -561,6 +653,7 @@ class ComfyApp { setInterval(() => localStorage.setItem("workflow", JSON.stringify(this.graph.serialize())), 1000); this.#addDrawNodeHandler(); + this.#addDrawGroupsHandler(); this.#addApiUpdateHandlers(); this.#addDropHandler(); this.#addPasteHandler(); From cf0098d5398cc330ffa8af9a0472ad1b8610515a Mon Sep 17 00:00:00 2001 From: Francesco Yoshi Gobbo Date: Mon, 27 Mar 2023 04:51:18 +0200 Subject: [PATCH 3/7] no lowvram state if cpu only --- comfy/model_management.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index 0d5702b91..d9498e29f 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -22,7 +22,8 @@ try: total_vram = torch.cuda.mem_get_info(torch.cuda.current_device())[1] / (1024 * 1024) total_ram = psutil.virtual_memory().total / (1024 * 1024) forced_normal_vram = "--normalvram" in sys.argv - if not forced_normal_vram: + forced_cpu = "--cpu" in sys.argv + if not forced_normal_vram and not forced_cpu: if total_vram <= 4096: print("Trying to enable lowvram mode because your GPU seems to have 4GB or less. If you don't want this use: --normalvram") set_vram_to = LOW_VRAM From f55755f0d24255a1207a803dc30efe9e4466bfe1 Mon Sep 17 00:00:00 2001 From: Francesco Yoshi Gobbo Date: Mon, 27 Mar 2023 06:48:09 +0200 Subject: [PATCH 4/7] code cleanup --- comfy/model_management.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index d9498e29f..4aa47ff16 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -15,6 +15,8 @@ total_vram_available_mb = -1 import sys import psutil +forced_cpu = "--cpu" in sys.argv + set_vram_to = NORMAL_VRAM try: @@ -22,7 +24,6 @@ try: total_vram = torch.cuda.mem_get_info(torch.cuda.current_device())[1] / (1024 * 1024) total_ram = psutil.virtual_memory().total / (1024 * 1024) forced_normal_vram = "--normalvram" in sys.argv - forced_cpu = "--cpu" in sys.argv if not forced_normal_vram and not forced_cpu: if total_vram <= 4096: print("Trying to enable lowvram mode because your GPU seems to have 4GB or less. If you don't want this use: --normalvram") @@ -84,7 +85,7 @@ try: except: pass -if "--cpu" in sys.argv: +if forced_cpu: vram_state = CPU print("Set vram state to:", ["CPU", "NO VRAM", "LOW VRAM", "NORMAL VRAM", "HIGH VRAM", "MPS"][vram_state]) From bb1503ed63eb49b43a4c1a5abd4df0872cac5d8a Mon Sep 17 00:00:00 2001 From: Silversith Date: Mon, 27 Mar 2023 07:16:22 +0200 Subject: [PATCH 5/7] Bugfix/include optional node inputs (#271) * Minor changes and extra nodes * Added Preview Image for Nodes * Add a delete images button on main floating menu * Add a confirmation dialog * Remove DeleteAll, Remove Custom KSampler, Remove Image List * Remove Image and Custom Json Load * Remove Custom Nodes * remove patch.diff --- nodes.py | 2 +- web/scripts/app.js | 5 ++++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/nodes.py b/nodes.py index f24bc4a5e..6fb7f0175 100644 --- a/nodes.py +++ b/nodes.py @@ -1052,4 +1052,4 @@ def load_custom_nodes(): load_custom_nodes() -load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_upscale_model.py")) +load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_upscale_model.py")) \ No newline at end of file diff --git a/web/scripts/app.js b/web/scripts/app.js index a9d3485ca..89eb71122 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -683,7 +683,10 @@ class ComfyApp { const nodeData = defs[nodeId]; const node = Object.assign( function ComfyNode() { - const inputs = nodeData["input"]["required"]; + var inputs = nodeData["input"]["required"]; + if (nodeData["input"]["optional"] != undefined){ + inputs = Object.assign({}, nodeData["input"]["required"], nodeData["input"]["optional"]) + } const config = { minWidth: 1, minHeight: 1 }; for (const inputName in inputs) { const inputData = inputs[inputName]; From 3444ffff3b892456f94c13365ab498e6971f018d Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 27 Mar 2023 01:56:22 -0400 Subject: [PATCH 6/7] Fix IS_CHANGED not working on nodes with an input from another node. --- execution.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/execution.py b/execution.py index 757e0d9f9..aafc86976 100644 --- a/execution.py +++ b/execution.py @@ -18,6 +18,8 @@ def get_input_data(inputs, class_def, outputs={}, prompt={}, extra_data={}): if isinstance(input_data, list): input_unique_id = input_data[0] output_index = input_data[1] + if input_unique_id not in outputs: + return None obj = outputs[input_unique_id][output_index] input_data_all[x] = obj else: @@ -94,9 +96,10 @@ def recursive_output_delete_if_changed(prompt, old_prompt, outputs, current_item if unique_id in old_prompt and 'is_changed' in old_prompt[unique_id]: is_changed_old = old_prompt[unique_id]['is_changed'] if 'is_changed' not in prompt[unique_id]: - input_data_all = get_input_data(inputs, class_def) - is_changed = class_def.IS_CHANGED(**input_data_all) - prompt[unique_id]['is_changed'] = is_changed + input_data_all = get_input_data(inputs, class_def, outputs) + if input_data_all is not None: + is_changed = class_def.IS_CHANGED(**input_data_all) + prompt[unique_id]['is_changed'] = is_changed else: is_changed = prompt[unique_id]['is_changed'] From bb1223d83fe7fba6bbe80dea22ff8f1044b709d9 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 27 Mar 2023 02:16:58 -0400 Subject: [PATCH 7/7] Fix errors appearing more than once. --- execution.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/execution.py b/execution.py index aafc86976..3ca551db6 100644 --- a/execution.py +++ b/execution.py @@ -281,7 +281,7 @@ def validate_prompt(prompt): errors += [(o, reason)] if len(good_outputs) == 0: - errors_list = "\n".join(map(lambda a: "{}".format(a[1]), errors)) + errors_list = "\n".join(set(map(lambda a: "{}".format(a[1]), errors))) return (False, "Prompt has no properly connected outputs\n {}".format(errors_list)) return (True, "")