From 0300e7a6781c4f30b5ab7320a3ea6bc7cbaab4fd Mon Sep 17 00:00:00 2001 From: m957ymj75urz Date: Thu, 9 Mar 2023 18:02:03 +0100 Subject: [PATCH 01/49] add batch count to the menu --- web/scripts/app.js | 40 +++++++++++++++++++++------------------- web/scripts/ui.js | 9 +++++++-- 2 files changed, 28 insertions(+), 21 deletions(-) diff --git a/web/scripts/app.js b/web/scripts/app.js index 1cf81b8f9..8dd76f398 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -641,31 +641,33 @@ class ComfyApp { return { workflow, output }; } - async queuePrompt(number) { - const p = await this.graphToPrompt(); + async queuePrompt(number, batchCount = 1) { + for (let i = 0; i < batchCount; i++) { + const p = await this.graphToPrompt(); - try { - await api.queuePrompt(number, p); - } catch (error) { - this.ui.dialog.show(error.response || error.toString()); - return; - } + try { + await api.queuePrompt(number, p); + } catch (error) { + this.ui.dialog.show(error.response || error.toString()); + return; + } - for (const n of p.workflow.nodes) { - const node = graph.getNodeById(n.id); - if (node.widgets) { - for (const widget of node.widgets) { - // Allow widgets to run callbacks after a prompt has been queued - // e.g. random seed after every gen - if (widget.afterQueued) { - widget.afterQueued(); + for (const n of p.workflow.nodes) { + const node = graph.getNodeById(n.id); + if (node.widgets) { + for (const widget of node.widgets) { + // Allow widgets to run callbacks after a prompt has been queued + // e.g. random seed after every gen + if (widget.afterQueued) { + widget.afterQueued(); + } } } } - } - this.canvas.draw(true, true); - await this.ui.queue.update(); + this.canvas.draw(true, true); + await this.ui.queue.update(); + } } /** diff --git a/web/scripts/ui.js b/web/scripts/ui.js index 2c5a75f39..f839fef29 100644 --- a/web/scripts/ui.js +++ b/web/scripts/ui.js @@ -254,9 +254,14 @@ export class ComfyUI { $el("span", { $: (q) => (this.queueSize = q) }), $el("button.comfy-settings-btn", { textContent: "⚙️", onclick: () => this.settings.show() }), ]), - $el("button.comfy-queue-btn", { textContent: "Queue Prompt", onclick: () => app.queuePrompt(0) }), + $el("div", { style: { width: "100%" }}, [ + $el("label", { innerHTML: "Batch count" }, [ + $el("input", { type: "number", value: "1", min: "1", style: { width: "30%", "margin-left": "0.4em" }, onchange: (i) => this.batchCount = i.target.value }) + ]), + ]), + $el("button.comfy-queue-btn", { textContent: "Queue Prompt", onclick: () => app.queuePrompt(0, this.batchCount) }), $el("div.comfy-menu-btns", [ - $el("button", { textContent: "Queue Front", onclick: () => app.queuePrompt(-1) }), + $el("button", { textContent: "Queue Front", onclick: () => app.queuePrompt(-1, this.batchCount) }), $el("button", { $: (b) => (this.queue.button = b), textContent: "View Queue", From 114e8ca4ed06c2c28b40a51cce9a0fcd73e98ed5 Mon Sep 17 00:00:00 2001 From: m957ymj75urz Date: Fri, 10 Mar 2023 10:38:35 +0100 Subject: [PATCH 02/49] added a range input --- web/scripts/ui.js | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/web/scripts/ui.js b/web/scripts/ui.js index f839fef29..1ba95bb40 100644 --- a/web/scripts/ui.js +++ b/web/scripts/ui.js @@ -231,6 +231,7 @@ export class ComfyUI { this.dialog = new ComfyDialog(); this.settings = new ComfySettingsDialog(); + this.batchCount = 1; this.queue = new ComfyList("Queue"); this.history = new ComfyList("History"); @@ -256,7 +257,20 @@ export class ComfyUI { ]), $el("div", { style: { width: "100%" }}, [ $el("label", { innerHTML: "Batch count" }, [ - $el("input", { type: "number", value: "1", min: "1", style: { width: "30%", "margin-left": "0.4em" }, onchange: (i) => this.batchCount = i.target.value }) + $el("input", { id: "batchCountInputNumber", type: "number", value: this.batchCount, min: "1", style: { width: "35%", "margin-left": "0.4em" }, + oninput: (i) => { + this.batchCount = i.target.value; + document.getElementById('batchCountInputRange').value = this.batchCount; + console.log("number"); + } + }), + $el("input", { id: "batchCountInputRange", type: "range", min: "1", max: "100", value: this.batchCount, + oninput: (i) => { + this.batchCount = i.srcElement.value; + document.getElementById('batchCountInputNumber').value = i.srcElement.value; + console.log("range"); + } + }), ]), ]), $el("button.comfy-queue-btn", { textContent: "Queue Prompt", onclick: () => app.queuePrompt(0, this.batchCount) }), From 7c95e1a03bb6a9a5ed8814d1c571433eb45ac4e3 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sun, 12 Mar 2023 15:44:16 -0400 Subject: [PATCH 03/49] Xformers is now properly disabled when --cpu used. Added --windows-standalone-build option, currently it only opens makes the code open up comfyui in the browser. --- comfy/ldm/modules/attention.py | 5 ++--- comfy/ldm/modules/diffusionmodules/model.py | 6 ++---- comfy/model_management.py | 15 +++++++++++++++ main.py | 15 +++++++++++---- server.py | 5 ++++- 5 files changed, 34 insertions(+), 12 deletions(-) diff --git a/comfy/ldm/modules/attention.py b/comfy/ldm/modules/attention.py index 692952f32..a6d40e890 100644 --- a/comfy/ldm/modules/attention.py +++ b/comfy/ldm/modules/attention.py @@ -14,9 +14,8 @@ import model_management try: import xformers import xformers.ops - XFORMERS_IS_AVAILBLE = True except: - XFORMERS_IS_AVAILBLE = False + pass # CrossAttn precision handling import os @@ -481,7 +480,7 @@ class CrossAttentionPytorch(nn.Module): return self.to_out(out) import sys -if XFORMERS_IS_AVAILBLE == False or "--disable-xformers" in sys.argv: +if model_management.xformers_enabled() == False: if "--use-split-cross-attention" in sys.argv: print("Using split optimization for cross attention") CrossAttention = CrossAttentionDoggettx diff --git a/comfy/ldm/modules/diffusionmodules/model.py b/comfy/ldm/modules/diffusionmodules/model.py index 18f7a8b08..15f35b914 100644 --- a/comfy/ldm/modules/diffusionmodules/model.py +++ b/comfy/ldm/modules/diffusionmodules/model.py @@ -12,10 +12,8 @@ import model_management try: import xformers import xformers.ops - XFORMERS_IS_AVAILBLE = True except: - XFORMERS_IS_AVAILBLE = False - print("No module 'xformers'. Proceeding without it.") + pass try: OOM_EXCEPTION = torch.cuda.OutOfMemoryError @@ -315,7 +313,7 @@ class MemoryEfficientCrossAttentionWrapper(MemoryEfficientCrossAttention): def make_attn(in_channels, attn_type="vanilla", attn_kwargs=None): assert attn_type in ["vanilla", "vanilla-xformers", "memory-efficient-cross-attn", "linear", "none"], f'attn_type {attn_type} unknown' - if XFORMERS_IS_AVAILBLE and attn_type == "vanilla": + if model_management.xformers_enabled() and attn_type == "vanilla": attn_type = "vanilla-xformers" print(f"making attention of type '{attn_type}' with {in_channels} in_channels") if attn_type == "vanilla": diff --git a/comfy/model_management.py b/comfy/model_management.py index 4b061c32d..c1a8f5a2f 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -31,6 +31,16 @@ try: except: pass +try: + import xformers + import xformers.ops + XFORMERS_IS_AVAILBLE = True +except: + XFORMERS_IS_AVAILBLE = False + +if "--disable-xformers" in sys.argv: + XFORMERS_IS_AVAILBLE = False + if "--cpu" in sys.argv: vram_state = CPU if "--lowvram" in sys.argv: @@ -159,6 +169,11 @@ def get_autocast_device(dev): return dev.type return "cuda" +def xformers_enabled(): + if vram_state == CPU: + return False + return XFORMERS_IS_AVAILBLE + def get_free_memory(dev=None, torch_free_too=False): if dev is None: dev = get_torch_device() diff --git a/main.py b/main.py index ca8674b55..c3d96039b 100644 --- a/main.py +++ b/main.py @@ -38,8 +38,8 @@ def prompt_worker(q, server): e.execute(item[-2], item[-1]) q.task_done(item_id, e.outputs) -async def run(server, address='', port=8188, verbose=True): - await asyncio.gather(server.start(address, port, verbose), server.publish_loop()) +async def run(server, address='', port=8188, verbose=True, call_on_start=None): + await asyncio.gather(server.start(address, port, verbose, call_on_start), server.publish_loop()) def hijack_progress(server): from tqdm.auto import tqdm @@ -76,11 +76,18 @@ if __name__ == "__main__": except: pass + call_on_start = None + if "--windows-standalone-build" in sys.argv: + def startup_server(address, port): + import webbrowser + webbrowser.open("http://{}:{}".format(address, port)) + call_on_start = startup_server + if os.name == "nt": try: - loop.run_until_complete(run(server, address=address, port=port, verbose=not dont_print)) + loop.run_until_complete(run(server, address=address, port=port, verbose=not dont_print, call_on_start=call_on_start)) except KeyboardInterrupt: pass else: - loop.run_until_complete(run(server, address=address, port=port, verbose=not dont_print)) + loop.run_until_complete(run(server, address=address, port=port, verbose=not dont_print, call_on_start=call_on_start)) diff --git a/server.py b/server.py index 5aba57619..a29d85974 100644 --- a/server.py +++ b/server.py @@ -260,7 +260,7 @@ class PromptServer(): msg = await self.messages.get() await self.send(*msg) - async def start(self, address, port, verbose=True): + async def start(self, address, port, verbose=True, call_on_start=None): runner = web.AppRunner(self.app) await runner.setup() site = web.TCPSite(runner, address, port) @@ -271,3 +271,6 @@ class PromptServer(): if verbose: print("Starting server\n") print("To see the GUI go to: http://{}:{}".format(address, port)) + if call_on_start is not None: + call_on_start(address, port) + From a01ea06d82c3eb619d3d0f7622aae8045c9d9aff Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 13 Mar 2023 01:34:09 -0400 Subject: [PATCH 04/49] Add an automatic standalone windows release for nvidia and cpu. --- .ci/setup_windows_zip.ps1 | 25 +++++++ .ci/update_windows/update.py | 65 +++++++++++++++++++ .ci/update_windows/update_all.bat | 3 + .ci/update_windows/update_comfyui_only.bat | 2 + .../README_VERY_IMPORTANT.txt | 22 +++++++ .ci/windows_base_files/run_cpu.bat | 2 + .ci/windows_base_files/run_nvidia_gpu.bat | 2 + .github/workflows/windows_release.yml | 33 ++++++++++ 8 files changed, 154 insertions(+) create mode 100755 .ci/setup_windows_zip.ps1 create mode 100755 .ci/update_windows/update.py create mode 100755 .ci/update_windows/update_all.bat create mode 100755 .ci/update_windows/update_comfyui_only.bat create mode 100755 .ci/windows_base_files/README_VERY_IMPORTANT.txt create mode 100755 .ci/windows_base_files/run_cpu.bat create mode 100755 .ci/windows_base_files/run_nvidia_gpu.bat create mode 100644 .github/workflows/windows_release.yml diff --git a/.ci/setup_windows_zip.ps1 b/.ci/setup_windows_zip.ps1 new file mode 100755 index 000000000..4bd2f0b4a --- /dev/null +++ b/.ci/setup_windows_zip.ps1 @@ -0,0 +1,25 @@ +Invoke-WebRequest -URI https://www.python.org/ftp/python/3.10.9/python-3.10.9-embed-amd64.zip -O python_embeded.zip +Expand-Archive python_embeded.zip +cd python_embeded +Add-Content -Path .\python310._pth -Value 'import site' +Invoke-WebRequest -Uri https://bootstrap.pypa.io/get-pip.py -OutFile get-pip.py +.\python.exe get-pip.py +.\python.exe -s -m pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu117 xformers -r ../ComfyUI/requirements.txt pygit2 +"../ComfyUI`n" + (Get-Content .\python310._pth -Raw) | Set-Content .\python310._pth +cd .. + + +mkdir ComfyUI_windows_portable +mv python_embeded ComfyUI_windows_portable +mv ComfyUI_copy ComfyUI_windows_portable/ComfyUI + +cd ComfyUI_windows_portable + +mkdir update +cp ComfyUI/.ci/update_windows/* ./update/ +cp ComfyUI/.ci/windows_base_files/* ./ + +cd .. + +& "C:\Program Files\7-Zip\7z.exe" a -t7z -m0=lzma -mx=8 -mfb=64 -md=32m -ms=on ComfyUI_windows_portable.7z ComfyUI_windows_portable +mv ComfyUI_windows_portable.7z ComfyUI/ComfyUI_windows_portable_nvidia_or_cpu.7z diff --git a/.ci/update_windows/update.py b/.ci/update_windows/update.py new file mode 100755 index 000000000..c09f29a80 --- /dev/null +++ b/.ci/update_windows/update.py @@ -0,0 +1,65 @@ +import pygit2 +from datetime import datetime +import sys + +def pull(repo, remote_name='origin', branch='master'): + for remote in repo.remotes: + if remote.name == remote_name: + remote.fetch() + remote_master_id = repo.lookup_reference('refs/remotes/origin/%s' % (branch)).target + merge_result, _ = repo.merge_analysis(remote_master_id) + # Up to date, do nothing + if merge_result & pygit2.GIT_MERGE_ANALYSIS_UP_TO_DATE: + return + # We can just fastforward + elif merge_result & pygit2.GIT_MERGE_ANALYSIS_FASTFORWARD: + repo.checkout_tree(repo.get(remote_master_id)) + try: + master_ref = repo.lookup_reference('refs/heads/%s' % (branch)) + master_ref.set_target(remote_master_id) + except KeyError: + repo.create_branch(branch, repo.get(remote_master_id)) + repo.head.set_target(remote_master_id) + elif merge_result & pygit2.GIT_MERGE_ANALYSIS_NORMAL: + repo.merge(remote_master_id) + + if repo.index.conflicts is not None: + for conflict in repo.index.conflicts: + print('Conflicts found in:', conflict[0].path) + raise AssertionError('Conflicts, ahhhhh!!') + + user = repo.default_signature + tree = repo.index.write_tree() + commit = repo.create_commit('HEAD', + user, + user, + 'Merge!', + tree, + [repo.head.target, remote_master_id]) + # We need to do this or git CLI will think we are still merging. + repo.state_cleanup() + else: + raise AssertionError('Unknown merge analysis result') + + +repo = pygit2.Repository(str(sys.argv[1])) +ident = pygit2.Signature('comfyui', 'comfy@ui') +try: + print("stashing current changes") + repo.stash(ident) +except KeyError: + print("nothing to stash") +backup_branch_name = 'backup_branch_{}'.format(datetime.today().strftime('%Y-%m-%d_%H_%M_%S')) +print("creating backup branch: {}".format(backup_branch_name)) +repo.branches.local.create(backup_branch_name, repo.head.peel()) + +print("checking out master branch") +branch = repo.lookup_branch('master') +ref = repo.lookup_reference(branch.name) +repo.checkout(ref) + +print("pulling latest changes") +pull(repo) + +print("Done!") + diff --git a/.ci/update_windows/update_all.bat b/.ci/update_windows/update_all.bat new file mode 100755 index 000000000..b7308550d --- /dev/null +++ b/.ci/update_windows/update_all.bat @@ -0,0 +1,3 @@ +..\python_embeded\python.exe .\update.py ..\ComfyUI\ +..\python_embeded\python.exe -s -m pip install --upgrade torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu117 xformers -r ../ComfyUI/requirements.txt pygit2 +pause diff --git a/.ci/update_windows/update_comfyui_only.bat b/.ci/update_windows/update_comfyui_only.bat new file mode 100755 index 000000000..60d1e694f --- /dev/null +++ b/.ci/update_windows/update_comfyui_only.bat @@ -0,0 +1,2 @@ +..\python_embeded\python.exe .\update.py ..\ComfyUI\ +pause diff --git a/.ci/windows_base_files/README_VERY_IMPORTANT.txt b/.ci/windows_base_files/README_VERY_IMPORTANT.txt new file mode 100755 index 000000000..3c73a27ac --- /dev/null +++ b/.ci/windows_base_files/README_VERY_IMPORTANT.txt @@ -0,0 +1,22 @@ +HOW TO RUN: + +if you have a NVIDIA gpu: + +run_nvidia_gpu.bat + + + +To run it in slow CPU mode: + +run_cpu.bat + + + +IF YOU GET A RED ERROR IN THE UI MAKE SURE YOU HAVE A MODEL/CHECKPOINT IN: ComfyUI\models\checkpoints + +You can download the stable diffusion 1.5 one from: https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.ckpt + + + +To update only the ComfyUI code: update\update_comfyui_only.bat +To update ComfyUI with the python dependencies: update\update_all.bat diff --git a/.ci/windows_base_files/run_cpu.bat b/.ci/windows_base_files/run_cpu.bat new file mode 100755 index 000000000..c3ba41721 --- /dev/null +++ b/.ci/windows_base_files/run_cpu.bat @@ -0,0 +1,2 @@ +.\python_embeded\python.exe -s ComfyUI\main.py --cpu --windows-standalone-build +pause diff --git a/.ci/windows_base_files/run_nvidia_gpu.bat b/.ci/windows_base_files/run_nvidia_gpu.bat new file mode 100755 index 000000000..274d7c948 --- /dev/null +++ b/.ci/windows_base_files/run_nvidia_gpu.bat @@ -0,0 +1,2 @@ +.\python_embeded\python.exe -s ComfyUI\main.py --windows-standalone-build +pause diff --git a/.github/workflows/windows_release.yml b/.github/workflows/windows_release.yml new file mode 100644 index 000000000..bc94a079c --- /dev/null +++ b/.github/workflows/windows_release.yml @@ -0,0 +1,33 @@ +name: "Windows Release" + +on: + push: + branches: + - master + +jobs: + build: + permissions: + contents: "write" + packages: "write" + pull-requests: "read" + runs-on: windows-latest + steps: + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + + - run: | + cd .. + cp ComfyUI/.ci/setup_windows_zip.ps1 ./ + cp -r ComfyUI ComfyUI_copy + .\setup_windows_zip.ps1 + ls + + - uses: "marvinpinto/action-automatic-releases@latest" + with: + repo_token: "${{ secrets.GITHUB_TOKEN }}" + automatic_release_tag: "latest" + prerelease: true + title: "ComfyUI Standalone Portable Windows Build (For NVIDIA or CPU only)" + files: ComfyUI_windows_portable_nvidia_or_cpu.7z From 09bcc6b0b4bd15d4bd0d440c889d7457b9065bf3 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 13 Mar 2023 02:54:30 -0400 Subject: [PATCH 05/49] Probably safer to manually trigger builds instead of every push. --- .github/workflows/windows_release.yml | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/.github/workflows/windows_release.yml b/.github/workflows/windows_release.yml index bc94a079c..a6cb883b7 100644 --- a/.github/workflows/windows_release.yml +++ b/.github/workflows/windows_release.yml @@ -1,9 +1,10 @@ name: "Windows Release" on: - push: - branches: - - master + workflow_dispatch: +# push: +# branches: +# - master jobs: build: From ec31178790177346b89f0e6ef1a0799a131e59bb Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 13 Mar 2023 10:42:02 -0400 Subject: [PATCH 06/49] Add download link of portable build to README. --- README.md | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 75d75ada2..5870f8523 100644 --- a/README.md +++ b/README.md @@ -31,6 +31,16 @@ Workflow examples can be found on the [Examples page](https://comfyanonymous.git # Installing +### Windows + +There is a portable standalone build for Windows that should work on for running on Nvidia GPUs or for running on your CPU only on the [releases page](https://github.com/comfyanonymous/ComfyUI/releases). + +[Direct link to download](https://github.com/comfyanonymous/ComfyUI/releases/download/latest/ComfyUI_windows_portable_nvidia_or_cpu.7z) + +Just download, extract and run. Make sure you put a checkpoint/model file in ComfyUI/models/checkpoints + +## Manual Install (Windows, Linux) + Git clone this repo. Put your SD checkpoints (the huge ckpt/safetensors files) in: models/checkpoints @@ -39,7 +49,7 @@ Put your VAE in: models/vae At the time of writing this pytorch has issues with python versions higher than 3.10 so make sure your python/pip versions are 3.10. -### AMD +### AMD (Linux only) AMD users can install rocm and pytorch with pip if you don't have it already installed: ```pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/rocm5.2``` From 778b72d1b2c4ea118a228850dfbc464e65317ba1 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 13 Mar 2023 10:43:07 -0400 Subject: [PATCH 07/49] Typo. --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 5870f8523..c34fdf698 100644 --- a/README.md +++ b/README.md @@ -33,7 +33,7 @@ Workflow examples can be found on the [Examples page](https://comfyanonymous.git ### Windows -There is a portable standalone build for Windows that should work on for running on Nvidia GPUs or for running on your CPU only on the [releases page](https://github.com/comfyanonymous/ComfyUI/releases). +There is a portable standalone build for Windows that should work for running on Nvidia GPUs or for running on your CPU only on the [releases page](https://github.com/comfyanonymous/ComfyUI/releases). [Direct link to download](https://github.com/comfyanonymous/ComfyUI/releases/download/latest/ComfyUI_windows_portable_nvidia_or_cpu.7z) From ce95885989496df43b304fe7f71cdef56bc34099 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 13 Mar 2023 10:44:29 -0400 Subject: [PATCH 08/49] Readme style adjustments. --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index c34fdf698..7366fa259 100644 --- a/README.md +++ b/README.md @@ -31,11 +31,11 @@ Workflow examples can be found on the [Examples page](https://comfyanonymous.git # Installing -### Windows +## Windows There is a portable standalone build for Windows that should work for running on Nvidia GPUs or for running on your CPU only on the [releases page](https://github.com/comfyanonymous/ComfyUI/releases). -[Direct link to download](https://github.com/comfyanonymous/ComfyUI/releases/download/latest/ComfyUI_windows_portable_nvidia_or_cpu.7z) +### [Direct link to download](https://github.com/comfyanonymous/ComfyUI/releases/download/latest/ComfyUI_windows_portable_nvidia_or_cpu.7z) Just download, extract and run. Make sure you put a checkpoint/model file in ComfyUI/models/checkpoints From 54ecbb0552cbc4be4a1f137a7d385929f8c59fd7 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 13 Mar 2023 10:47:12 -0400 Subject: [PATCH 09/49] Readme improvement. --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 7366fa259..b49d02562 100644 --- a/README.md +++ b/README.md @@ -37,7 +37,7 @@ There is a portable standalone build for Windows that should work for running on ### [Direct link to download](https://github.com/comfyanonymous/ComfyUI/releases/download/latest/ComfyUI_windows_portable_nvidia_or_cpu.7z) -Just download, extract and run. Make sure you put a checkpoint/model file in ComfyUI/models/checkpoints +Just download, extract and run. Make sure you put your Stable Diffusion checkpoints/models (the huge ckpt/safetensors files) in: ComfyUI\models\checkpoints ## Manual Install (Windows, Linux) From 72b42ab260a462bdfce30a7d961b13d0e6207fe7 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 13 Mar 2023 11:36:48 -0400 Subject: [PATCH 10/49] --disable-xformers should not even try to import xformers. --- comfy/ldm/modules/attention.py | 5 ++--- comfy/ldm/modules/diffusionmodules/model.py | 4 +--- comfy/model_management.py | 15 ++++++++------- main.py | 7 ++++--- 4 files changed, 15 insertions(+), 16 deletions(-) diff --git a/comfy/ldm/modules/attention.py b/comfy/ldm/modules/attention.py index a6d40e890..f78a1a6cf 100644 --- a/comfy/ldm/modules/attention.py +++ b/comfy/ldm/modules/attention.py @@ -11,11 +11,10 @@ from .sub_quadratic_attention import efficient_dot_product_attention import model_management -try: + +if model_management.xformers_enabled(): import xformers import xformers.ops -except: - pass # CrossAttn precision handling import os diff --git a/comfy/ldm/modules/diffusionmodules/model.py b/comfy/ldm/modules/diffusionmodules/model.py index 15f35b914..fcbee29ff 100644 --- a/comfy/ldm/modules/diffusionmodules/model.py +++ b/comfy/ldm/modules/diffusionmodules/model.py @@ -9,11 +9,9 @@ from typing import Optional, Any from ldm.modules.attention import MemoryEfficientCrossAttention import model_management -try: +if model_management.xformers_enabled(): import xformers import xformers.ops -except: - pass try: OOM_EXCEPTION = torch.cuda.OutOfMemoryError diff --git a/comfy/model_management.py b/comfy/model_management.py index c1a8f5a2f..7365beefe 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -31,15 +31,16 @@ try: except: pass -try: - import xformers - import xformers.ops - XFORMERS_IS_AVAILBLE = True -except: - XFORMERS_IS_AVAILBLE = False - if "--disable-xformers" in sys.argv: XFORMERS_IS_AVAILBLE = False +else: + try: + import xformers + import xformers.ops + XFORMERS_IS_AVAILBLE = True + except: + XFORMERS_IS_AVAILBLE = False + if "--cpu" in sys.argv: vram_state = CPU diff --git a/main.py b/main.py index c3d96039b..fc37781cd 100644 --- a/main.py +++ b/main.py @@ -8,9 +8,6 @@ if os.name == "nt": import logging logging.getLogger("xformers").addFilter(lambda record: 'A matching Triton is not available' not in record.getMessage()) -import execution -import server - if __name__ == "__main__": if '--help' in sys.argv: print("Valid Command line Arguments:") @@ -18,6 +15,7 @@ if __name__ == "__main__": print("\t--port 8188\t\t\tSet the listen port.") print("\t--dont-upcast-attention\t\tDisable upcasting of attention \n\t\t\t\t\tcan boost speed but increase the chances of black images.\n") print("\t--use-split-cross-attention\tUse the split cross attention optimization instead of the sub-quadratic one.\n\t\t\t\t\tIgnored when xformers is used.") + print("\t--disable-xformers\t\tdisables xformers") print() print("\t--highvram\t\t\tBy default models will be unloaded to CPU memory after being used.\n\t\t\t\t\tThis option keeps them in GPU memory.\n") print("\t--normalvram\t\t\tUsed to force normal vram use if lowvram gets automatically enabled.") @@ -31,6 +29,9 @@ if __name__ == "__main__": print("disabling upcasting of attention") os.environ['ATTN_PRECISION'] = "fp16" +import execution +import server + def prompt_worker(q, server): e = execution.PromptExecutor(server) while True: From 1de721b33c5bf826a0213ef7493ba1af968b81ec Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 13 Mar 2023 12:25:19 -0400 Subject: [PATCH 11/49] Add pytorch attention support to VAE. --- comfy/ldm/modules/attention.py | 22 +++----- comfy/ldm/modules/diffusionmodules/model.py | 62 +++++++++++++++++++++ comfy/model_management.py | 11 ++++ main.py | 1 + 4 files changed, 83 insertions(+), 13 deletions(-) diff --git a/comfy/ldm/modules/attention.py b/comfy/ldm/modules/attention.py index f78a1a6cf..e97badd04 100644 --- a/comfy/ldm/modules/attention.py +++ b/comfy/ldm/modules/attention.py @@ -479,23 +479,19 @@ class CrossAttentionPytorch(nn.Module): return self.to_out(out) import sys -if model_management.xformers_enabled() == False: +if model_management.xformers_enabled(): + print("Using xformers cross attention") + CrossAttention = MemoryEfficientCrossAttention +elif model_management.pytorch_attention_enabled(): + print("Using pytorch cross attention") + CrossAttention = CrossAttentionPytorch +else: if "--use-split-cross-attention" in sys.argv: print("Using split optimization for cross attention") CrossAttention = CrossAttentionDoggettx else: - if "--use-pytorch-cross-attention" in sys.argv: - print("Using pytorch cross attention") - torch.backends.cuda.enable_math_sdp(False) - torch.backends.cuda.enable_flash_sdp(True) - torch.backends.cuda.enable_mem_efficient_sdp(True) - CrossAttention = CrossAttentionPytorch - else: - print("Using sub quadratic optimization for cross attention, if you have memory or speed issues try using: --use-split-cross-attention") - CrossAttention = CrossAttentionBirchSan -else: - print("Using xformers cross attention") - CrossAttention = MemoryEfficientCrossAttention + print("Using sub quadratic optimization for cross attention, if you have memory or speed issues try using: --use-split-cross-attention") + CrossAttention = CrossAttentionBirchSan class BasicTransformerBlock(nn.Module): diff --git a/comfy/ldm/modules/diffusionmodules/model.py b/comfy/ldm/modules/diffusionmodules/model.py index fcbee29ff..129b86a7f 100644 --- a/comfy/ldm/modules/diffusionmodules/model.py +++ b/comfy/ldm/modules/diffusionmodules/model.py @@ -299,6 +299,64 @@ class MemoryEfficientAttnBlock(nn.Module): out = self.proj_out(out) return x+out +class MemoryEfficientAttnBlockPytorch(nn.Module): + def __init__(self, in_channels): + super().__init__() + self.in_channels = in_channels + + self.norm = Normalize(in_channels) + self.q = torch.nn.Conv2d(in_channels, + in_channels, + kernel_size=1, + stride=1, + padding=0) + self.k = torch.nn.Conv2d(in_channels, + in_channels, + kernel_size=1, + stride=1, + padding=0) + self.v = torch.nn.Conv2d(in_channels, + in_channels, + kernel_size=1, + stride=1, + padding=0) + self.proj_out = torch.nn.Conv2d(in_channels, + in_channels, + kernel_size=1, + stride=1, + padding=0) + self.attention_op: Optional[Any] = None + + def forward(self, x): + h_ = x + h_ = self.norm(h_) + q = self.q(h_) + k = self.k(h_) + v = self.v(h_) + + # compute attention + B, C, H, W = q.shape + q, k, v = map(lambda x: rearrange(x, 'b c h w -> b (h w) c'), (q, k, v)) + + q, k, v = map( + lambda t: t.unsqueeze(3) + .reshape(B, t.shape[1], 1, C) + .permute(0, 2, 1, 3) + .reshape(B * 1, t.shape[1], C) + .contiguous(), + (q, k, v), + ) + out = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=None, dropout_p=0.0, is_causal=False) + + out = ( + out.unsqueeze(0) + .reshape(B, 1, out.shape[1], C) + .permute(0, 2, 1, 3) + .reshape(B, out.shape[1], C) + ) + out = rearrange(out, 'b (h w) c -> b c h w', b=B, h=H, w=W, c=C) + out = self.proj_out(out) + return x+out class MemoryEfficientCrossAttentionWrapper(MemoryEfficientCrossAttention): def forward(self, x, context=None, mask=None): @@ -313,6 +371,8 @@ def make_attn(in_channels, attn_type="vanilla", attn_kwargs=None): assert attn_type in ["vanilla", "vanilla-xformers", "memory-efficient-cross-attn", "linear", "none"], f'attn_type {attn_type} unknown' if model_management.xformers_enabled() and attn_type == "vanilla": attn_type = "vanilla-xformers" + if model_management.pytorch_attention_enabled() and attn_type == "vanilla": + attn_type = "vanilla-pytorch" print(f"making attention of type '{attn_type}' with {in_channels} in_channels") if attn_type == "vanilla": assert attn_kwargs is None @@ -320,6 +380,8 @@ def make_attn(in_channels, attn_type="vanilla", attn_kwargs=None): elif attn_type == "vanilla-xformers": print(f"building MemoryEfficientAttnBlock with {in_channels} in_channels...") return MemoryEfficientAttnBlock(in_channels) + elif attn_type == "vanilla-pytorch": + return MemoryEfficientAttnBlockPytorch(in_channels) elif type == "memory-efficient-cross-attn": attn_kwargs["query_dim"] = in_channels return MemoryEfficientCrossAttentionWrapper(**attn_kwargs) diff --git a/comfy/model_management.py b/comfy/model_management.py index 7365beefe..482b1add9 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -41,6 +41,14 @@ else: except: XFORMERS_IS_AVAILBLE = False +ENABLE_PYTORCH_ATTENTION = False +if "--use-pytorch-cross-attention" in sys.argv: + torch.backends.cuda.enable_math_sdp(True) + torch.backends.cuda.enable_flash_sdp(True) + torch.backends.cuda.enable_mem_efficient_sdp(True) + ENABLE_PYTORCH_ATTENTION = True + XFORMERS_IS_AVAILBLE = False + if "--cpu" in sys.argv: vram_state = CPU @@ -175,6 +183,9 @@ def xformers_enabled(): return False return XFORMERS_IS_AVAILBLE +def pytorch_attention_enabled(): + return ENABLE_PYTORCH_ATTENTION + def get_free_memory(dev=None, torch_free_too=False): if dev is None: dev = get_torch_device() diff --git a/main.py b/main.py index fc37781cd..b2b3f1c40 100644 --- a/main.py +++ b/main.py @@ -15,6 +15,7 @@ if __name__ == "__main__": print("\t--port 8188\t\t\tSet the listen port.") print("\t--dont-upcast-attention\t\tDisable upcasting of attention \n\t\t\t\t\tcan boost speed but increase the chances of black images.\n") print("\t--use-split-cross-attention\tUse the split cross attention optimization instead of the sub-quadratic one.\n\t\t\t\t\tIgnored when xformers is used.") + print("\t--use-pytorch-cross-attention\tUse the new pytorch 2.0 cross attention function.") print("\t--disable-xformers\t\tdisables xformers") print() print("\t--highvram\t\t\tBy default models will be unloaded to CPU memory after being used.\n\t\t\t\t\tThis option keeps them in GPU memory.\n") From 65380c0068de14d05f3a6a19d964560d587f5c58 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 13 Mar 2023 12:47:12 -0400 Subject: [PATCH 12/49] Add github workflow for a build with pytorch nightly. --- .ci/nightly/update_windows/update.py | 65 +++++++++++++++++++ .ci/nightly/update_windows/update_all.bat | 3 + .../update_windows/update_comfyui_only.bat | 2 + .../README_VERY_IMPORTANT.txt | 22 +++++++ .ci/nightly/windows_base_files/run_cpu.bat | 2 + .../windows_base_files/run_nvidia_gpu.bat | 2 + .ci/setup_windows_zip_nightly_pytorch.ps1 | 26 ++++++++ .../windows_release_nightly_pytorch.yml | 34 ++++++++++ 8 files changed, 156 insertions(+) create mode 100755 .ci/nightly/update_windows/update.py create mode 100755 .ci/nightly/update_windows/update_all.bat create mode 100755 .ci/nightly/update_windows/update_comfyui_only.bat create mode 100755 .ci/nightly/windows_base_files/README_VERY_IMPORTANT.txt create mode 100755 .ci/nightly/windows_base_files/run_cpu.bat create mode 100755 .ci/nightly/windows_base_files/run_nvidia_gpu.bat create mode 100755 .ci/setup_windows_zip_nightly_pytorch.ps1 create mode 100644 .github/workflows/windows_release_nightly_pytorch.yml diff --git a/.ci/nightly/update_windows/update.py b/.ci/nightly/update_windows/update.py new file mode 100755 index 000000000..c09f29a80 --- /dev/null +++ b/.ci/nightly/update_windows/update.py @@ -0,0 +1,65 @@ +import pygit2 +from datetime import datetime +import sys + +def pull(repo, remote_name='origin', branch='master'): + for remote in repo.remotes: + if remote.name == remote_name: + remote.fetch() + remote_master_id = repo.lookup_reference('refs/remotes/origin/%s' % (branch)).target + merge_result, _ = repo.merge_analysis(remote_master_id) + # Up to date, do nothing + if merge_result & pygit2.GIT_MERGE_ANALYSIS_UP_TO_DATE: + return + # We can just fastforward + elif merge_result & pygit2.GIT_MERGE_ANALYSIS_FASTFORWARD: + repo.checkout_tree(repo.get(remote_master_id)) + try: + master_ref = repo.lookup_reference('refs/heads/%s' % (branch)) + master_ref.set_target(remote_master_id) + except KeyError: + repo.create_branch(branch, repo.get(remote_master_id)) + repo.head.set_target(remote_master_id) + elif merge_result & pygit2.GIT_MERGE_ANALYSIS_NORMAL: + repo.merge(remote_master_id) + + if repo.index.conflicts is not None: + for conflict in repo.index.conflicts: + print('Conflicts found in:', conflict[0].path) + raise AssertionError('Conflicts, ahhhhh!!') + + user = repo.default_signature + tree = repo.index.write_tree() + commit = repo.create_commit('HEAD', + user, + user, + 'Merge!', + tree, + [repo.head.target, remote_master_id]) + # We need to do this or git CLI will think we are still merging. + repo.state_cleanup() + else: + raise AssertionError('Unknown merge analysis result') + + +repo = pygit2.Repository(str(sys.argv[1])) +ident = pygit2.Signature('comfyui', 'comfy@ui') +try: + print("stashing current changes") + repo.stash(ident) +except KeyError: + print("nothing to stash") +backup_branch_name = 'backup_branch_{}'.format(datetime.today().strftime('%Y-%m-%d_%H_%M_%S')) +print("creating backup branch: {}".format(backup_branch_name)) +repo.branches.local.create(backup_branch_name, repo.head.peel()) + +print("checking out master branch") +branch = repo.lookup_branch('master') +ref = repo.lookup_reference(branch.name) +repo.checkout(ref) + +print("pulling latest changes") +pull(repo) + +print("Done!") + diff --git a/.ci/nightly/update_windows/update_all.bat b/.ci/nightly/update_windows/update_all.bat new file mode 100755 index 000000000..c5e0c6be7 --- /dev/null +++ b/.ci/nightly/update_windows/update_all.bat @@ -0,0 +1,3 @@ +..\python_embeded\python.exe .\update.py ..\ComfyUI\ +..\python_embeded\python.exe -s -m pip install --upgrade --pre torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu118 -r ../ComfyUI/requirements.txt pygit2 +pause diff --git a/.ci/nightly/update_windows/update_comfyui_only.bat b/.ci/nightly/update_windows/update_comfyui_only.bat new file mode 100755 index 000000000..60d1e694f --- /dev/null +++ b/.ci/nightly/update_windows/update_comfyui_only.bat @@ -0,0 +1,2 @@ +..\python_embeded\python.exe .\update.py ..\ComfyUI\ +pause diff --git a/.ci/nightly/windows_base_files/README_VERY_IMPORTANT.txt b/.ci/nightly/windows_base_files/README_VERY_IMPORTANT.txt new file mode 100755 index 000000000..3c73a27ac --- /dev/null +++ b/.ci/nightly/windows_base_files/README_VERY_IMPORTANT.txt @@ -0,0 +1,22 @@ +HOW TO RUN: + +if you have a NVIDIA gpu: + +run_nvidia_gpu.bat + + + +To run it in slow CPU mode: + +run_cpu.bat + + + +IF YOU GET A RED ERROR IN THE UI MAKE SURE YOU HAVE A MODEL/CHECKPOINT IN: ComfyUI\models\checkpoints + +You can download the stable diffusion 1.5 one from: https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.ckpt + + + +To update only the ComfyUI code: update\update_comfyui_only.bat +To update ComfyUI with the python dependencies: update\update_all.bat diff --git a/.ci/nightly/windows_base_files/run_cpu.bat b/.ci/nightly/windows_base_files/run_cpu.bat new file mode 100755 index 000000000..c3ba41721 --- /dev/null +++ b/.ci/nightly/windows_base_files/run_cpu.bat @@ -0,0 +1,2 @@ +.\python_embeded\python.exe -s ComfyUI\main.py --cpu --windows-standalone-build +pause diff --git a/.ci/nightly/windows_base_files/run_nvidia_gpu.bat b/.ci/nightly/windows_base_files/run_nvidia_gpu.bat new file mode 100755 index 000000000..8ee2f3402 --- /dev/null +++ b/.ci/nightly/windows_base_files/run_nvidia_gpu.bat @@ -0,0 +1,2 @@ +.\python_embeded\python.exe -s ComfyUI\main.py --windows-standalone-build --use-pytorch-cross-attention +pause diff --git a/.ci/setup_windows_zip_nightly_pytorch.ps1 b/.ci/setup_windows_zip_nightly_pytorch.ps1 new file mode 100755 index 000000000..edc3024ab --- /dev/null +++ b/.ci/setup_windows_zip_nightly_pytorch.ps1 @@ -0,0 +1,26 @@ +Invoke-WebRequest -URI https://www.python.org/ftp/python/3.10.9/python-3.10.9-embed-amd64.zip -O python_embeded.zip +Expand-Archive python_embeded.zip +rm python_embeded.zip +cd python_embeded +Add-Content -Path .\python310._pth -Value 'import site' +Invoke-WebRequest -Uri https://bootstrap.pypa.io/get-pip.py -OutFile get-pip.py +.\python.exe get-pip.py +.\python.exe -s -m pip install torch torchvision torchaudio --pre --extra-index-url https://download.pytorch.org/whl/nightly/cu118 -r ../ComfyUI/requirements.txt pygit2 +"../ComfyUI`n" + (Get-Content .\python310._pth -Raw) | Set-Content .\python310._pth +cd .. + + +mkdir ComfyUI_windows_portable +mv python_embeded ComfyUI_windows_portable_nightly_pytorch +mv ComfyUI_copy ComfyUI_windows_portable_nightly_pytorch/ComfyUI + +cd ComfyUI_windows_portable_nightly_pytorch + +mkdir update +cp ComfyUI/.ci/nightly/update_windows/* ./update/ +cp ComfyUI/.ci/nightly/windows_base_files/* ./ + +cd .. + +& "C:\Program Files\7-Zip\7z.exe" a -t7z -m0=lzma -mx=8 -mfb=64 -md=32m -ms=on ComfyUI_windows_portable_nightly_pytorch.7z ComfyUI_windows_portable_nightly_pytorch +mv ComfyUI_windows_portable_nightly_pytorch.7z ComfyUI/ComfyUI_windows_portable_nvidia_or_cpu_nightly_pytorch.7z diff --git a/.github/workflows/windows_release_nightly_pytorch.yml b/.github/workflows/windows_release_nightly_pytorch.yml new file mode 100644 index 000000000..f958a4ab7 --- /dev/null +++ b/.github/workflows/windows_release_nightly_pytorch.yml @@ -0,0 +1,34 @@ +name: "Windows Release Nightly pytorch" + +on: + workflow_dispatch: +# push: +# branches: +# - master + +jobs: + build: + permissions: + contents: "write" + packages: "write" + pull-requests: "read" + runs-on: windows-latest + steps: + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + + - run: | + cd .. + cp ComfyUI/.ci/setup_windows_zip_nightly_pytorch.ps1 ./ + cp -r ComfyUI ComfyUI_copy + .\setup_windows_zip_nightly_pytorch.ps1 + ls + + - name: Upload binaries to release + uses: svenstaro/upload-release-action@v2 + with: + repo_token: ${{ secrets.GITHUB_TOKEN }} + file: ComfyUI_windows_portable_nvidia_or_cpu_nightly_pytorch.7z + tag: "latest" + overwrite: true From b955ef069dac1aa5ccae3c12d76b717588069e60 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 13 Mar 2023 13:50:54 -0400 Subject: [PATCH 13/49] Add command to install unstable pytorch builds for ROCm. --- README.md | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index b49d02562..5c80b10a2 100644 --- a/README.md +++ b/README.md @@ -50,10 +50,16 @@ Put your VAE in: models/vae At the time of writing this pytorch has issues with python versions higher than 3.10 so make sure your python/pip versions are 3.10. ### AMD (Linux only) -AMD users can install rocm and pytorch with pip if you don't have it already installed: +AMD users can install rocm and pytorch with pip if you don't have it already installed, this is the command to install the stable version: ```pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/rocm5.2``` + +I highly recommend you use the nightly/unstable pytorch builds though because they work a lot better for me (run this in the ComfyUI folder so it picks up the requirements.txt): + +```pip install --upgrade --pre torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/nightly/rocm5.4.2 -r requirements.txt``` + + ### NVIDIA Nvidia users should install torch using this command: From 5be28c4069b231670e467311efbc48514e719126 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 13 Mar 2023 14:49:18 -0400 Subject: [PATCH 14/49] Remove omegaconf dependency and some ci changes. --- .ci/setup_windows_zip_nightly_pytorch.ps1 | 5 +++-- .github/workflows/windows_release.yml | 13 +++++++------ .../workflows/windows_release_nightly_pytorch.yml | 4 +++- comfy/cldm/cldm.py | 6 +++--- comfy/ldm/models/diffusion/ddpm.py | 5 ++--- comfy/ldm/modules/diffusionmodules/openaimodel.py | 6 +++--- comfy/sd.py | 7 ++++--- requirements.txt | 2 +- 8 files changed, 26 insertions(+), 22 deletions(-) diff --git a/.ci/setup_windows_zip_nightly_pytorch.ps1 b/.ci/setup_windows_zip_nightly_pytorch.ps1 index edc3024ab..6d13bad93 100755 --- a/.ci/setup_windows_zip_nightly_pytorch.ps1 +++ b/.ci/setup_windows_zip_nightly_pytorch.ps1 @@ -5,11 +5,12 @@ cd python_embeded Add-Content -Path .\python310._pth -Value 'import site' Invoke-WebRequest -Uri https://bootstrap.pypa.io/get-pip.py -OutFile get-pip.py .\python.exe get-pip.py -.\python.exe -s -m pip install torch torchvision torchaudio --pre --extra-index-url https://download.pytorch.org/whl/nightly/cu118 -r ../ComfyUI/requirements.txt pygit2 +python -m pip wheel torch torchvision torchaudio --pre --extra-index-url https://download.pytorch.org/whl/nightly/cu118 -r ../ComfyUI/requirements.txt pygit2 -w ../temp_wheel_dir +ls ../temp_wheel_dir +.\python.exe -s -m pip install --pre (get-item ..\temp_wheel_dir\*) "../ComfyUI`n" + (Get-Content .\python310._pth -Raw) | Set-Content .\python310._pth cd .. - mkdir ComfyUI_windows_portable mv python_embeded ComfyUI_windows_portable_nightly_pytorch mv ComfyUI_copy ComfyUI_windows_portable_nightly_pytorch/ComfyUI diff --git a/.github/workflows/windows_release.yml b/.github/workflows/windows_release.yml index a6cb883b7..be48b7eae 100644 --- a/.github/workflows/windows_release.yml +++ b/.github/workflows/windows_release.yml @@ -25,10 +25,11 @@ jobs: .\setup_windows_zip.ps1 ls - - uses: "marvinpinto/action-automatic-releases@latest" + - name: Upload binaries to release + uses: svenstaro/upload-release-action@v2 with: - repo_token: "${{ secrets.GITHUB_TOKEN }}" - automatic_release_tag: "latest" - prerelease: true - title: "ComfyUI Standalone Portable Windows Build (For NVIDIA or CPU only)" - files: ComfyUI_windows_portable_nvidia_or_cpu.7z + repo_token: ${{ secrets.GITHUB_TOKEN }} + file: ComfyUI_windows_portable_nvidia_or_cpu.7z + tag: "latest" + overwrite: true + diff --git a/.github/workflows/windows_release_nightly_pytorch.yml b/.github/workflows/windows_release_nightly_pytorch.yml index f958a4ab7..1aeaef45a 100644 --- a/.github/workflows/windows_release_nightly_pytorch.yml +++ b/.github/workflows/windows_release_nightly_pytorch.yml @@ -17,7 +17,9 @@ jobs: - uses: actions/checkout@v3 with: fetch-depth: 0 - + - uses: actions/setup-python@v4 + with: + python-version: '3.10.9' - run: | cd .. cp ComfyUI/.ci/setup_windows_zip_nightly_pytorch.ps1 ./ diff --git a/comfy/cldm/cldm.py b/comfy/cldm/cldm.py index 8d14a6907..c60abf80b 100644 --- a/comfy/cldm/cldm.py +++ b/comfy/cldm/cldm.py @@ -59,9 +59,9 @@ class ControlNet(nn.Module): if context_dim is not None: assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...' - from omegaconf.listconfig import ListConfig - if type(context_dim) == ListConfig: - context_dim = list(context_dim) + # from omegaconf.listconfig import ListConfig + # if type(context_dim) == ListConfig: + # context_dim = list(context_dim) if num_heads_upsample == -1: num_heads_upsample = num_heads diff --git a/comfy/ldm/models/diffusion/ddpm.py b/comfy/ldm/models/diffusion/ddpm.py index 802034c72..42ed2add7 100644 --- a/comfy/ldm/models/diffusion/ddpm.py +++ b/comfy/ldm/models/diffusion/ddpm.py @@ -18,7 +18,6 @@ import itertools from tqdm import tqdm from torchvision.utils import make_grid # from pytorch_lightning.utilities.distributed import rank_zero_only -from omegaconf import ListConfig from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config from ldm.modules.ema import LitEma @@ -1124,8 +1123,8 @@ class LatentDiffusion(DDPM): def get_unconditional_conditioning(self, batch_size, null_label=None): if null_label is not None: xc = null_label - if isinstance(xc, ListConfig): - xc = list(xc) + # if isinstance(xc, ListConfig): + # xc = list(xc) if isinstance(xc, dict) or isinstance(xc, list): c = self.get_learned_conditioning(xc) else: diff --git a/comfy/ldm/modules/diffusionmodules/openaimodel.py b/comfy/ldm/modules/diffusionmodules/openaimodel.py index 9a652c295..09ab1a066 100644 --- a/comfy/ldm/modules/diffusionmodules/openaimodel.py +++ b/comfy/ldm/modules/diffusionmodules/openaimodel.py @@ -477,9 +477,9 @@ class UNetModel(nn.Module): if context_dim is not None: assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...' - from omegaconf.listconfig import ListConfig - if type(context_dim) == ListConfig: - context_dim = list(context_dim) + # from omegaconf.listconfig import ListConfig + # if type(context_dim) == ListConfig: + # context_dim = list(context_dim) if num_heads_upsample == -1: num_heads_upsample = num_heads diff --git a/comfy/sd.py b/comfy/sd.py index fd434ba6a..3f5ce24e9 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -6,7 +6,7 @@ import sd2_clip import model_management from .ldm.util import instantiate_from_config from .ldm.models.autoencoder import AutoencoderKL -from omegaconf import OmegaConf +import yaml from .cldm import cldm from .t2i_adapter import adapter @@ -726,7 +726,8 @@ def load_clip(ckpt_path, embedding_directory=None): return clip def load_checkpoint(config_path, ckpt_path, output_vae=True, output_clip=True, embedding_directory=None): - config = OmegaConf.load(config_path) + with open(config_path, 'r') as stream: + config = yaml.safe_load(stream) model_config_params = config['model']['params'] clip_config = model_config_params['cond_stage_config'] scale_factor = model_config_params['scale_factor'] @@ -750,7 +751,7 @@ def load_checkpoint(config_path, ckpt_path, output_vae=True, output_clip=True, e w.cond_stage_model = clip.cond_stage_model load_state_dict_to = [w] - model = instantiate_from_config(config.model) + model = instantiate_from_config(config["model"]) sd = load_torch_file(ckpt_path) model = load_model_weights(model, sd, verbose=False, load_state_dict_to=load_state_dict_to) return (ModelPatcher(model), clip, vae) diff --git a/requirements.txt b/requirements.txt index 45f2599d9..bc8b3c558 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,6 @@ torch torchdiffeq torchsde -omegaconf einops open-clip-torch transformers @@ -9,3 +8,4 @@ safetensors pytorch_lightning aiohttp accelerate +pyyaml From 454bc1c9f6373a00dd40c20692acd29a5affc015 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 13 Mar 2023 15:09:11 -0400 Subject: [PATCH 15/49] Fix small issue with build. --- .ci/setup_windows_zip_nightly_pytorch.ps1 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.ci/setup_windows_zip_nightly_pytorch.ps1 b/.ci/setup_windows_zip_nightly_pytorch.ps1 index 6d13bad93..b4d5633a1 100755 --- a/.ci/setup_windows_zip_nightly_pytorch.ps1 +++ b/.ci/setup_windows_zip_nightly_pytorch.ps1 @@ -11,7 +11,7 @@ ls ../temp_wheel_dir "../ComfyUI`n" + (Get-Content .\python310._pth -Raw) | Set-Content .\python310._pth cd .. -mkdir ComfyUI_windows_portable +mkdir ComfyUI_windows_portable_nightly_pytorch mv python_embeded ComfyUI_windows_portable_nightly_pytorch mv ComfyUI_copy ComfyUI_windows_portable_nightly_pytorch/ComfyUI From 0c128cb91dca1a674c174817990b3276ddea1f91 Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Mon, 13 Mar 2023 19:34:05 +0000 Subject: [PATCH 16/49] Added ability to save images to temp dir --- main.py | 9 +++++++++ nodes.py | 21 ++++++++++++++++++--- server.py | 2 +- 3 files changed, 28 insertions(+), 4 deletions(-) diff --git a/main.py b/main.py index b2b3f1c40..889e2cef7 100644 --- a/main.py +++ b/main.py @@ -1,5 +1,6 @@ import os import sys +import shutil import threading import asyncio @@ -53,7 +54,14 @@ def hijack_progress(server): return v setattr(tqdm, "update", wrapped_func) +def cleanup_temp(): + temp_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "temp") + if os.path.exists(temp_dir): + shutil.rmtree(temp_dir) + if __name__ == "__main__": + cleanup_temp() + loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) server = server.PromptServer(loop) @@ -93,3 +101,4 @@ if __name__ == "__main__": else: loop.run_until_complete(run(server, address=address, port=port, verbose=not dont_print, call_on_start=call_on_start)) + cleanup_temp() diff --git a/nodes.py b/nodes.py index 0a0a0a9cd..b201c352c 100644 --- a/nodes.py +++ b/nodes.py @@ -775,12 +775,14 @@ class KSamplerAdvanced: class SaveImage: def __init__(self): self.output_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "output") + self.temp_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "temp") @classmethod def INPUT_TYPES(s): return {"required": {"images": ("IMAGE", ), - "filename_prefix": ("STRING", {"default": "ComfyUI"})}, + "filename_prefix": ("STRING", {"default": "ComfyUI"}), + "use_temp_file": (["yes", "no"], {"default" : "no"} ),}, "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}, } @@ -791,7 +793,7 @@ class SaveImage: CATEGORY = "image" - def save_images(self, images, filename_prefix="ComfyUI", prompt=None, extra_pnginfo=None): + def save_images(self, images, filename_prefix="ComfyUI", use_temp_file="no", prompt=None, extra_pnginfo=None): def map_filename(filename): prefix_len = len(filename_prefix) prefix = filename[:prefix_len + 1] @@ -818,8 +820,21 @@ class SaveImage: if extra_pnginfo is not None: for x in extra_pnginfo: metadata.add_text(x, json.dumps(extra_pnginfo[x])) + file = f"{filename_prefix}_{counter:05}_.png" - img.save(os.path.join(self.output_dir, file), pnginfo=metadata, optimize=True) + + if use_temp_file == "yes": + if not os.path.exists(self.temp_dir): + os.makedirs(self.temp_dir) + dir = self.temp_dir + else: + dir = self.output_dir + + img.save(os.path.join(dir, file), pnginfo=metadata, optimize=True) + + if use_temp_file == "yes": + file += "?type=temp" + paths.append(file) counter += 1 return { "ui": { "images": paths } } diff --git a/server.py b/server.py index a29d85974..eb6857010 100644 --- a/server.py +++ b/server.py @@ -113,7 +113,7 @@ class PromptServer(): async def view_image(request): if "file" in request.match_info: type = request.rel_url.query.get("type", "output") - if type != "output" and type != "input": + if type not in ["output", "input", "temp"]: return web.Response(status=400) output_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), type) From b0911ee5aad319aceafc1092841c3870ee09c0fe Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Mon, 13 Mar 2023 19:34:29 +0000 Subject: [PATCH 17/49] Combo support detault value --- web/scripts/app.js | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/web/scripts/app.js b/web/scripts/app.js index e70e1c157..8b832eba1 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -497,7 +497,11 @@ class ComfyApp { if (Array.isArray(type)) { // Enums e.g. latent rotation - this.addWidget("combo", inputName, type[0], () => {}, { values: type }); + let defaultValue = type[0]; + if (inputData[1] && inputData[1].default) { + defaultValue = inputData[1].default; + } + this.addWidget("combo", inputName, defaultValue, () => {}, { values: type }); } else if (`${type}:${inputName}` in widgets) { // Support custom widgets by Type:Name Object.assign(config, widgets[`${type}:${inputName}`](this, inputName, inputData, app) || {}); From be5f0fc241afb5eb05e077febd137e67a721210c Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 13 Mar 2023 17:50:48 -0400 Subject: [PATCH 18/49] Move colab link to the installing section. --- README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 5c80b10a2..8877d4495 100644 --- a/README.md +++ b/README.md @@ -39,6 +39,10 @@ There is a portable standalone build for Windows that should work for running on Just download, extract and run. Make sure you put your Stable Diffusion checkpoints/models (the huge ckpt/safetensors files) in: ComfyUI\models\checkpoints +## Colab Notebook + +To run it on colab or paperspace you can use my [Colab Notebook](notebooks/comfyui_colab.ipynb) here: [Link to open with google colab](https://colab.research.google.com/github/comfyanonymous/ComfyUI/blob/master/notebooks/comfyui_colab.ipynb) + ## Manual Install (Windows, Linux) Git clone this repo. @@ -128,10 +132,6 @@ To use a textual inversion concepts/embeddings in a text prompt put them in the ```embedding:embedding_filename.pt``` -### Colab Notebook - -To run it on colab or paperspace you can use my [Colab Notebook](notebooks/comfyui_colab.ipynb) here: [Link to open with google colab](https://colab.research.google.com/github/comfyanonymous/ComfyUI/blob/master/notebooks/comfyui_colab.ipynb) - ### Fedora To get python 3.10 on fedora: From edba761868f8b49be1d1136f6b8e77721f3f5b54 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 13 Mar 2023 20:58:09 -0400 Subject: [PATCH 19/49] Use half() function on model when loading in fp16. --- comfy/sd.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/comfy/sd.py b/comfy/sd.py index 3f5ce24e9..c7e0b073d 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -854,4 +854,7 @@ def load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, e model = instantiate_from_config(model_config) model = load_model_weights(model, sd, verbose=False, load_state_dict_to=load_state_dict_to) + if fp16: + model = model.half() + return (ModelPatcher(model), clip, vae) From 889689c7b2bd69a7a09ce2828fc44b4e69385069 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 13 Mar 2023 21:12:48 -0400 Subject: [PATCH 20/49] use half() on fp16 models loaded with config. --- comfy/sd.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/comfy/sd.py b/comfy/sd.py index c7e0b073d..61d1916db 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -733,6 +733,12 @@ def load_checkpoint(config_path, ckpt_path, output_vae=True, output_clip=True, e scale_factor = model_config_params['scale_factor'] vae_config = model_config_params['first_stage_config'] + fp16 = False + if "unet_config" in model_config_params: + if "params" in model_config_params["unet_config"]: + if "use_fp16" in model_config_params["unet_config"]["params"]: + fp16 = model_config_params["unet_config"]["params"]["use_fp16"] + clip = None vae = None @@ -754,6 +760,10 @@ def load_checkpoint(config_path, ckpt_path, output_vae=True, output_clip=True, e model = instantiate_from_config(config["model"]) sd = load_torch_file(ckpt_path) model = load_model_weights(model, sd, verbose=False, load_state_dict_to=load_state_dict_to) + + if fp16: + model = model.half() + return (ModelPatcher(model), clip, vae) From dc8b43e5123af6227b15f0505ee93fe09dde3a94 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 13 Mar 2023 21:30:01 -0400 Subject: [PATCH 21/49] Make --cpu have priority over everything else. --- comfy/model_management.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index 482b1add9..c26d682f7 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -50,8 +50,6 @@ if "--use-pytorch-cross-attention" in sys.argv: XFORMERS_IS_AVAILBLE = False -if "--cpu" in sys.argv: - vram_state = CPU if "--lowvram" in sys.argv: set_vram_to = LOW_VRAM if "--novram" in sys.argv: @@ -73,6 +71,8 @@ if set_vram_to == LOW_VRAM or set_vram_to == NO_VRAM: total_vram_available_mb = (total_vram - 1024) // 2 total_vram_available_mb = int(max(256, total_vram_available_mb)) +if "--cpu" in sys.argv: + vram_state = CPU print("Set vram state to:", ["CPU", "NO VRAM", "LOW VRAM", "NORMAL VRAM", "HIGH VRAM"][vram_state]) From b91f00a1ff78ca9dc6ec0470ff20781c2b92b6e2 Mon Sep 17 00:00:00 2001 From: m957ymj75urz Date: Tue, 14 Mar 2023 08:16:48 +0100 Subject: [PATCH 22/49] toggle extra options --- web/scripts/ui.js | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/web/scripts/ui.js b/web/scripts/ui.js index 1ba95bb40..8134e3415 100644 --- a/web/scripts/ui.js +++ b/web/scripts/ui.js @@ -255,25 +255,28 @@ export class ComfyUI { $el("span", { $: (q) => (this.queueSize = q) }), $el("button.comfy-settings-btn", { textContent: "⚙️", onclick: () => this.settings.show() }), ]), - $el("div", { style: { width: "100%" }}, [ + $el("button.comfy-queue-btn", { textContent: "Queue Prompt", onclick: () => app.queuePrompt(0, this.batchCount) }), + $el("div", {}, [ + $el("label", { innerHTML: "Extra options"}, [ + $el("input", { type: "checkbox", onchange: (i) => document.getElementById('extraOptions').style.visibility = i.srcElement.checked ? "visible" : "collapse" }) + ]) + ]), + $el("div", { id: "extraOptions", style: { width: "100%", visibility: "collapse" }}, [ $el("label", { innerHTML: "Batch count" }, [ $el("input", { id: "batchCountInputNumber", type: "number", value: this.batchCount, min: "1", style: { width: "35%", "margin-left": "0.4em" }, oninput: (i) => { this.batchCount = i.target.value; document.getElementById('batchCountInputRange').value = this.batchCount; - console.log("number"); } }), $el("input", { id: "batchCountInputRange", type: "range", min: "1", max: "100", value: this.batchCount, oninput: (i) => { this.batchCount = i.srcElement.value; document.getElementById('batchCountInputNumber').value = i.srcElement.value; - console.log("range"); } }), ]), ]), - $el("button.comfy-queue-btn", { textContent: "Queue Prompt", onclick: () => app.queuePrompt(0, this.batchCount) }), $el("div.comfy-menu-btns", [ $el("button", { textContent: "Queue Front", onclick: () => app.queuePrompt(-1, this.batchCount) }), $el("button", { From 7e94fda991f0601c994271eb916973a09892c638 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 14 Mar 2023 13:39:58 -0400 Subject: [PATCH 23/49] Update standalone readme with recommended way to update. --- .ci/nightly/windows_base_files/README_VERY_IMPORTANT.txt | 6 +++++- .ci/windows_base_files/README_VERY_IMPORTANT.txt | 7 +++++-- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/.ci/nightly/windows_base_files/README_VERY_IMPORTANT.txt b/.ci/nightly/windows_base_files/README_VERY_IMPORTANT.txt index 3c73a27ac..4c6a20a7a 100755 --- a/.ci/nightly/windows_base_files/README_VERY_IMPORTANT.txt +++ b/.ci/nightly/windows_base_files/README_VERY_IMPORTANT.txt @@ -18,5 +18,9 @@ You can download the stable diffusion 1.5 one from: https://huggingface.co/runwa +RECOMMENDED WAY TO UPDATE: To update only the ComfyUI code: update\update_comfyui_only.bat -To update ComfyUI with the python dependencies: update\update_all.bat + + + +To update ComfyUI with the python dependencies (ONLY USE IF YOU NEED TO UPDATE THE PYTHON PACKAGES): update\update_all.bat diff --git a/.ci/windows_base_files/README_VERY_IMPORTANT.txt b/.ci/windows_base_files/README_VERY_IMPORTANT.txt index 3c73a27ac..69520db98 100755 --- a/.ci/windows_base_files/README_VERY_IMPORTANT.txt +++ b/.ci/windows_base_files/README_VERY_IMPORTANT.txt @@ -17,6 +17,9 @@ IF YOU GET A RED ERROR IN THE UI MAKE SURE YOU HAVE A MODEL/CHECKPOINT IN: Comfy You can download the stable diffusion 1.5 one from: https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.ckpt - +RECOMMENDED WAY TO UPDATE: To update only the ComfyUI code: update\update_comfyui_only.bat -To update ComfyUI with the python dependencies: update\update_all.bat + + + +To update ComfyUI with the python dependencies (ONLY USE IF YOU NEED TO UPDATE THE PYTHON PACKAGES): update\update_all.bat From e8ef9a9210eed833465aea6df4e8917007ed2cf3 Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Tue, 14 Mar 2023 19:28:07 +0000 Subject: [PATCH 24/49] Changed flag to new node --- nodes.py | 38 ++++++++++++++++++++------------------ 1 file changed, 20 insertions(+), 18 deletions(-) diff --git a/nodes.py b/nodes.py index b201c352c..b5aa3efe0 100644 --- a/nodes.py +++ b/nodes.py @@ -775,14 +775,13 @@ class KSamplerAdvanced: class SaveImage: def __init__(self): self.output_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "output") - self.temp_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "temp") + self.url_suffix = "" @classmethod def INPUT_TYPES(s): return {"required": {"images": ("IMAGE", ), - "filename_prefix": ("STRING", {"default": "ComfyUI"}), - "use_temp_file": (["yes", "no"], {"default" : "no"} ),}, + "filename_prefix": ("STRING", {"default": "ComfyUI"}),}, "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}, } @@ -810,6 +809,9 @@ class SaveImage: os.mkdir(self.output_dir) counter = 1 + if not os.path.exists(self.output_dir): + os.makedirs(self.output_dir) + paths = list() for image in images: i = 255. * image.cpu().numpy() @@ -820,25 +822,24 @@ class SaveImage: if extra_pnginfo is not None: for x in extra_pnginfo: metadata.add_text(x, json.dumps(extra_pnginfo[x])) - file = f"{filename_prefix}_{counter:05}_.png" - - if use_temp_file == "yes": - if not os.path.exists(self.temp_dir): - os.makedirs(self.temp_dir) - dir = self.temp_dir - else: - dir = self.output_dir - - img.save(os.path.join(dir, file), pnginfo=metadata, optimize=True) - - if use_temp_file == "yes": - file += "?type=temp" - - paths.append(file) + img.save(os.path.join(self.output_dir, file), pnginfo=metadata, optimize=True) + paths.append(file + self.url_suffix) counter += 1 return { "ui": { "images": paths } } +class PreviewImage(SaveImage): + def __init__(self): + self.output_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "temp") + self.url_suffix = "?type=temp" + + @classmethod + def INPUT_TYPES(s): + return {"required": + {"images": ("IMAGE", ), }, + "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}, + } + class LoadImage: input_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "input") @classmethod @@ -959,6 +960,7 @@ NODE_CLASS_MAPPINGS = { "EmptyLatentImage": EmptyLatentImage, "LatentUpscale": LatentUpscale, "SaveImage": SaveImage, + "PreviewImage": PreviewImage, "LoadImage": LoadImage, "LoadImageMask": LoadImageMask, "ImageScale": ImageScale, From c43394036a62475fce24887b8e9620b1393a6b92 Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Tue, 14 Mar 2023 19:39:49 +0000 Subject: [PATCH 25/49] Better auto pos of images --- web/scripts/app.js | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/web/scripts/app.js b/web/scripts/app.js index 8b832eba1..445bc5d4b 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -142,7 +142,14 @@ class ComfyApp { if (numImages === 1 && !imageIndex) { this.imageIndex = imageIndex = 0; } - let shiftY = this.type === "SaveImage" ? 55 : this.imageOffset || 0; + + let shiftY; + if (this.imageOffset != null) { + shiftY = this.imageOffset; + } else { + shiftY = this.computeSize()[1]; + } + let dw = this.size[0]; let dh = this.size[1]; dh -= shiftY; From dabe6816b7f6d55cc685e4a52eac42779e50a73b Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Tue, 14 Mar 2023 19:42:28 +0000 Subject: [PATCH 26/49] tidy --- nodes.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nodes.py b/nodes.py index b5aa3efe0..650d7f65d 100644 --- a/nodes.py +++ b/nodes.py @@ -781,7 +781,7 @@ class SaveImage: def INPUT_TYPES(s): return {"required": {"images": ("IMAGE", ), - "filename_prefix": ("STRING", {"default": "ComfyUI"}),}, + "filename_prefix": ("STRING", {"default": "ComfyUI"})}, "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}, } @@ -792,7 +792,7 @@ class SaveImage: CATEGORY = "image" - def save_images(self, images, filename_prefix="ComfyUI", use_temp_file="no", prompt=None, extra_pnginfo=None): + def save_images(self, images, filename_prefix="ComfyUI", prompt=None, extra_pnginfo=None): def map_filename(filename): prefix_len = len(filename_prefix) prefix = filename[:prefix_len + 1] From 69df145b845761914c5d9dc805ebe583d4f910e1 Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Tue, 14 Mar 2023 20:29:18 +0000 Subject: [PATCH 27/49] Added ctrl+enter to queue prompt --- web/scripts/app.js | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/web/scripts/app.js b/web/scripts/app.js index e70e1c157..6703908da 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -398,6 +398,15 @@ class ComfyApp { api.init(); } + #addKeyboardHandler() { + window.addEventListener("keydown", (e) => { + // Queue prompt using ctrl or command + enter + if ((e.ctrlKey || e.metaKey) && (e.key === "Enter" || e.keyCode === 13 || e.keyCode === 10)) { + this.queuePrompt(0); + } + }); + } + /** * Loads all extensions from the API into the window */ @@ -464,6 +473,7 @@ class ComfyApp { this.#addApiUpdateHandlers(); this.#addDropHandler(); this.#addPasteHandler(); + this.#addKeyboardHandler(); await this.#invokeExtensionsAsync("setup"); } From 4ba97adb5cd7c1c56ae8f16029392257fff511e5 Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Tue, 14 Mar 2023 20:31:27 +0000 Subject: [PATCH 28/49] Add ctrl+shift+enter for queue front --- web/scripts/app.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/scripts/app.js b/web/scripts/app.js index 6703908da..942fc9028 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -402,7 +402,7 @@ class ComfyApp { window.addEventListener("keydown", (e) => { // Queue prompt using ctrl or command + enter if ((e.ctrlKey || e.metaKey) && (e.key === "Enter" || e.keyCode === 13 || e.keyCode === 10)) { - this.queuePrompt(0); + this.queuePrompt(e.shiftKey ? -1 : 0); } }); } From 1f32af02107b28853ef992d6a22b0ab5343d7903 Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Tue, 14 Mar 2023 21:13:29 +0000 Subject: [PATCH 29/49] Add dragdrop handling to nodes with upload widget --- web/scripts/app.js | 51 +++++++++++++++++++++----- web/scripts/widgets.js | 81 +++++++++++++++++++++++++++++------------- 2 files changed, 99 insertions(+), 33 deletions(-) diff --git a/web/scripts/app.js b/web/scripts/app.js index e70e1c157..80a4eb35f 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -284,9 +284,37 @@ class ComfyApp { document.addEventListener("drop", async (event) => { event.preventDefault(); event.stopPropagation(); - const file = event.dataTransfer.files[0]; - await this.handleFile(file); + + const n = this.dragOverNode; + this.dragOverNode = null; + // Node handles file drop, we dont use the built in onDropFile handler as its buggy + // If you drag multiple files it will call it multiple times with the same file + if (n && n.onDragDrop && await n.onDragDrop(event)) { + return; + } + + await this.handleFile(event.dataTransfer.files[0]); }); + + // Add handler for dropping onto a specific node + this.canvasEl.addEventListener( + "dragover", + (e) => { + this.canvas.adjustMouseEvent(e); + const node = this.graph.getNodeOnPos(e.canvasX, e.canvasY); + if (node) { + if (node.onDragOver && node.onDragOver(e)) { + this.dragOverNode = node; + requestAnimationFrame(() => { + this.graph.setDirtyCanvas(false, true); + }); + return; + } + } + this.dragOverNode = null; + }, + false + ); } /** @@ -314,15 +342,22 @@ class ComfyApp { } /** - * Draws currently executing node highlight and progress bar + * Draws currently node highlights and progress bar */ - #addDrawNodeProgressHandler() { + #addDrawNodeHandler() { const orig = LGraphCanvas.prototype.drawNodeShape; const self = this; LGraphCanvas.prototype.drawNodeShape = function (node, ctx, size, fgcolor, bgcolor, selected, mouse_over) { const res = orig.apply(this, arguments); - if (node.id + "" === self.runningNodeId) { + let color = null; + if (node.id === +self.runningNodeId) { + color = "#0f0"; + } else if (self.dragOverNode && node.id === self.dragOverNode.id) { + color = "dodgerblue"; + } + + if (color) { const shape = node._shape || node.constructor.shape || LiteGraph.ROUND_SHAPE; ctx.lineWidth = 1; ctx.globalAlpha = 0.8; @@ -348,7 +383,7 @@ class ComfyApp { ); else if (shape == LiteGraph.CIRCLE_SHAPE) ctx.arc(size[0] * 0.5, size[1] * 0.5, size[0] * 0.5 + 6, 0, Math.PI * 2); - ctx.strokeStyle = "#0f0"; + ctx.strokeStyle = color; ctx.stroke(); ctx.strokeStyle = fgcolor; ctx.globalAlpha = 1; @@ -419,7 +454,7 @@ class ComfyApp { await this.#loadExtensions(); // Create and mount the LiteGraph in the DOM - const canvasEl = Object.assign(document.createElement("canvas"), { id: "graph-canvas" }); + const canvasEl = (this.canvasEl = Object.assign(document.createElement("canvas"), { id: "graph-canvas" })); document.body.prepend(canvasEl); this.graph = new LGraph(); @@ -460,7 +495,7 @@ class ComfyApp { // Save current workflow automatically setInterval(() => localStorage.setItem("workflow", JSON.stringify(this.graph.serialize())), 1000); - this.#addDrawNodeProgressHandler(); + this.#addDrawNodeHandler(); this.#addApiUpdateHandlers(); this.#addDropHandler(); this.#addPasteHandler(); diff --git a/web/scripts/widgets.js b/web/scripts/widgets.js index 55bdd8f18..3c4641964 100644 --- a/web/scripts/widgets.js +++ b/web/scripts/widgets.js @@ -132,7 +132,7 @@ export const ComfyWidgets = { function showImage(name) { // Position the image somewhere sensible - if(!node.imageOffset) { + if (!node.imageOffset) { node.imageOffset = uploadWidget.last_y ? uploadWidget.last_y + 25 : 75; } @@ -162,6 +162,36 @@ export const ComfyWidgets = { } }); + async function uploadFile(file, updateNode) { + try { + // Wrap file in formdata so it includes filename + const body = new FormData(); + body.append("image", file); + const resp = await fetch("/upload/image", { + method: "POST", + body, + }); + + if (resp.status === 200) { + const data = await resp.json(); + // Add the file as an option and update the widget value + if (!imageWidget.options.values.includes(data.name)) { + imageWidget.options.values.push(data.name); + } + + if (updateNode) { + showImage(data.name); + + imageWidget.value = data.name; + } + } else { + alert(resp.status + " - " + resp.statusText); + } + } catch (error) { + alert(error); + } + } + const fileInput = document.createElement("input"); Object.assign(fileInput, { type: "file", @@ -169,30 +199,7 @@ export const ComfyWidgets = { style: "display: none", onchange: async () => { if (fileInput.files.length) { - try { - // Wrap file in formdata so it includes filename - const body = new FormData(); - body.append("image", fileInput.files[0]); - const resp = await fetch("/upload/image", { - method: "POST", - body, - }); - - if (resp.status === 200) { - const data = await resp.json(); - showImage(data.name); - - // Add the file as an option and update the widget value - if (!imageWidget.options.values.includes(data.name)) { - imageWidget.options.values.push(data.name); - } - imageWidget.value = data.name; - } else { - alert(resp.status + " - " + resp.statusText); - } - } catch (error) { - alert(error); - } + await uploadFile(fileInput.files[0], true); } }, }); @@ -204,6 +211,30 @@ export const ComfyWidgets = { }); uploadWidget.serialize = false; + // Add handler to check if an image is being dragged over our node + node.onDragOver = function (e) { + if (e.dataTransfer && e.dataTransfer.items) { + const image = [...e.dataTransfer.items].find((f) => f.kind === "file" && f.type.startsWith("image/")); + return !!image; + } + + return false; + }; + + // On drop upload files + node.onDragDrop = function (e) { + console.log("onDragDrop called"); + let handled = false; + for (const file of e.dataTransfer.files) { + if (file.type.startsWith("image/")) { + uploadFile(file, !handled); // Dont await these, any order is fine, only update on first one + handled = true; + } + } + + return handled; + }; + return { widget: uploadWidget }; }, }; From 7a1d5b908022e0e1dd87e7cf176c0c8facdd835a Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Tue, 14 Mar 2023 21:21:50 +0000 Subject: [PATCH 30/49] Add dragleave handler to remove stuck highlight --- web/scripts/app.js | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/web/scripts/app.js b/web/scripts/app.js index 80a4eb35f..257d8524f 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -289,13 +289,21 @@ class ComfyApp { this.dragOverNode = null; // Node handles file drop, we dont use the built in onDropFile handler as its buggy // If you drag multiple files it will call it multiple times with the same file - if (n && n.onDragDrop && await n.onDragDrop(event)) { + if (n && n.onDragDrop && (await n.onDragDrop(event))) { return; } await this.handleFile(event.dataTransfer.files[0]); }); + // Always clear over node on drag leave + this.canvasEl.addEventListener("dragleave", async () => { + if (this.dragOverNode) { + this.dragOverNode = null; + this.graph.setDirtyCanvas(false, true); + } + }); + // Add handler for dropping onto a specific node this.canvasEl.addEventListener( "dragover", From b4b26310b8588c3efcc4d98b8ae7297151c0f9d1 Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Tue, 14 Mar 2023 21:22:47 +0000 Subject: [PATCH 31/49] Updated comment --- web/scripts/app.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/scripts/app.js b/web/scripts/app.js index 257d8524f..4d757adb2 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -350,7 +350,7 @@ class ComfyApp { } /** - * Draws currently node highlights and progress bar + * Draws node highlights (executing, drag drop) and progress bar */ #addDrawNodeHandler() { const orig = LGraphCanvas.prototype.drawNodeShape; From f430fdfb67f34a9c4717d85e9e975ca1033ff9e4 Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Tue, 14 Mar 2023 21:25:52 +0000 Subject: [PATCH 32/49] Explain why animation frame used --- web/scripts/app.js | 2 ++ 1 file changed, 2 insertions(+) diff --git a/web/scripts/app.js b/web/scripts/app.js index 4d757adb2..8b761b9bc 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -313,6 +313,8 @@ class ComfyApp { if (node) { if (node.onDragOver && node.onDragOver(e)) { this.dragOverNode = node; + + // dragover event is fired very frequently, run this on an animation frame requestAnimationFrame(() => { this.graph.setDirtyCanvas(false, true); }); From 2f9750b7b946a9911c3fba67dc0dcf74bb5f7f14 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 14 Mar 2023 18:07:09 -0400 Subject: [PATCH 33/49] Make sure windows permission issues don't mess things up. --- main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/main.py b/main.py index 889e2cef7..a7f6541fb 100644 --- a/main.py +++ b/main.py @@ -57,7 +57,7 @@ def hijack_progress(server): def cleanup_temp(): temp_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "temp") if os.path.exists(temp_dir): - shutil.rmtree(temp_dir) + shutil.rmtree(temp_dir, ignore_errors=True) if __name__ == "__main__": cleanup_temp() From 30be467b41e20262c9a7851ee592be9017de08c3 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 14 Mar 2023 19:08:23 -0400 Subject: [PATCH 34/49] Make it more clear the recommended way to update the standalone build. --- .../update_comfyui_and_python_dependencies.bat | 3 +++ .ci/nightly/update_windows/update_all.bat | 3 --- .../{update_comfyui_only.bat => update_comfyui.bat} | 0 .ci/nightly/windows_base_files/README_VERY_IMPORTANT.txt | 5 +++-- .ci/setup_windows_zip.ps1 | 4 ++-- .ci/setup_windows_zip_nightly_pytorch.ps1 | 4 ++-- .../update_comfyui_and_python_dependencies.bat | 3 +++ .ci/update_windows/update_all.bat | 3 --- .../{update_comfyui_only.bat => update_comfyui.bat} | 0 .ci/windows_base_files/README_VERY_IMPORTANT.txt | 6 ++++-- nodes.py | 6 +++--- 11 files changed, 20 insertions(+), 17 deletions(-) create mode 100755 .ci/nightly/update_windows/ONLY_RUN_THIS_IF_YOU_HAVE_TO/update_comfyui_and_python_dependencies.bat delete mode 100755 .ci/nightly/update_windows/update_all.bat rename .ci/nightly/update_windows/{update_comfyui_only.bat => update_comfyui.bat} (100%) create mode 100755 .ci/update_windows/ONLY_RUN_THIS_IF_YOU_HAVE_TO/update_comfyui_and_python_dependencies.bat delete mode 100755 .ci/update_windows/update_all.bat rename .ci/update_windows/{update_comfyui_only.bat => update_comfyui.bat} (100%) diff --git a/.ci/nightly/update_windows/ONLY_RUN_THIS_IF_YOU_HAVE_TO/update_comfyui_and_python_dependencies.bat b/.ci/nightly/update_windows/ONLY_RUN_THIS_IF_YOU_HAVE_TO/update_comfyui_and_python_dependencies.bat new file mode 100755 index 000000000..d58e3341e --- /dev/null +++ b/.ci/nightly/update_windows/ONLY_RUN_THIS_IF_YOU_HAVE_TO/update_comfyui_and_python_dependencies.bat @@ -0,0 +1,3 @@ +..\..\python_embeded\python.exe ..\update.py ..\..\ComfyUI\ +..\..\python_embeded\python.exe -s -m pip install --upgrade --pre torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu118 -r ../../ComfyUI/requirements.txt pygit2 +pause diff --git a/.ci/nightly/update_windows/update_all.bat b/.ci/nightly/update_windows/update_all.bat deleted file mode 100755 index c5e0c6be7..000000000 --- a/.ci/nightly/update_windows/update_all.bat +++ /dev/null @@ -1,3 +0,0 @@ -..\python_embeded\python.exe .\update.py ..\ComfyUI\ -..\python_embeded\python.exe -s -m pip install --upgrade --pre torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu118 -r ../ComfyUI/requirements.txt pygit2 -pause diff --git a/.ci/nightly/update_windows/update_comfyui_only.bat b/.ci/nightly/update_windows/update_comfyui.bat similarity index 100% rename from .ci/nightly/update_windows/update_comfyui_only.bat rename to .ci/nightly/update_windows/update_comfyui.bat diff --git a/.ci/nightly/windows_base_files/README_VERY_IMPORTANT.txt b/.ci/nightly/windows_base_files/README_VERY_IMPORTANT.txt index 4c6a20a7a..7066f91bf 100755 --- a/.ci/nightly/windows_base_files/README_VERY_IMPORTANT.txt +++ b/.ci/nightly/windows_base_files/README_VERY_IMPORTANT.txt @@ -19,8 +19,9 @@ You can download the stable diffusion 1.5 one from: https://huggingface.co/runwa RECOMMENDED WAY TO UPDATE: -To update only the ComfyUI code: update\update_comfyui_only.bat +To update the ComfyUI code: update\update_comfyui.bat -To update ComfyUI with the python dependencies (ONLY USE IF YOU NEED TO UPDATE THE PYTHON PACKAGES): update\update_all.bat +To update ComfyUI with the python dependencies: +update\ONLY_RUN_THIS_IF_YOU_HAVE_TO\update_comfyui_and_python_dependencies.bat diff --git a/.ci/setup_windows_zip.ps1 b/.ci/setup_windows_zip.ps1 index 4bd2f0b4a..6b38f498b 100755 --- a/.ci/setup_windows_zip.ps1 +++ b/.ci/setup_windows_zip.ps1 @@ -16,8 +16,8 @@ mv ComfyUI_copy ComfyUI_windows_portable/ComfyUI cd ComfyUI_windows_portable mkdir update -cp ComfyUI/.ci/update_windows/* ./update/ -cp ComfyUI/.ci/windows_base_files/* ./ +cp -r ComfyUI/.ci/update_windows/* ./update/ +cp -r ComfyUI/.ci/windows_base_files/* ./ cd .. diff --git a/.ci/setup_windows_zip_nightly_pytorch.ps1 b/.ci/setup_windows_zip_nightly_pytorch.ps1 index b4d5633a1..31721e5f6 100755 --- a/.ci/setup_windows_zip_nightly_pytorch.ps1 +++ b/.ci/setup_windows_zip_nightly_pytorch.ps1 @@ -18,8 +18,8 @@ mv ComfyUI_copy ComfyUI_windows_portable_nightly_pytorch/ComfyUI cd ComfyUI_windows_portable_nightly_pytorch mkdir update -cp ComfyUI/.ci/nightly/update_windows/* ./update/ -cp ComfyUI/.ci/nightly/windows_base_files/* ./ +cp -r ComfyUI/.ci/nightly/update_windows/* ./update/ +cp -r ComfyUI/.ci/nightly/windows_base_files/* ./ cd .. diff --git a/.ci/update_windows/ONLY_RUN_THIS_IF_YOU_HAVE_TO/update_comfyui_and_python_dependencies.bat b/.ci/update_windows/ONLY_RUN_THIS_IF_YOU_HAVE_TO/update_comfyui_and_python_dependencies.bat new file mode 100755 index 000000000..514621935 --- /dev/null +++ b/.ci/update_windows/ONLY_RUN_THIS_IF_YOU_HAVE_TO/update_comfyui_and_python_dependencies.bat @@ -0,0 +1,3 @@ +..\..\python_embeded\python.exe ..\update.py ..\..\ComfyUI\ +..\..\python_embeded\python.exe -s -m pip install --upgrade torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu117 xformers -r ../../ComfyUI/requirements.txt pygit2 +pause diff --git a/.ci/update_windows/update_all.bat b/.ci/update_windows/update_all.bat deleted file mode 100755 index b7308550d..000000000 --- a/.ci/update_windows/update_all.bat +++ /dev/null @@ -1,3 +0,0 @@ -..\python_embeded\python.exe .\update.py ..\ComfyUI\ -..\python_embeded\python.exe -s -m pip install --upgrade torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu117 xformers -r ../ComfyUI/requirements.txt pygit2 -pause diff --git a/.ci/update_windows/update_comfyui_only.bat b/.ci/update_windows/update_comfyui.bat similarity index 100% rename from .ci/update_windows/update_comfyui_only.bat rename to .ci/update_windows/update_comfyui.bat diff --git a/.ci/windows_base_files/README_VERY_IMPORTANT.txt b/.ci/windows_base_files/README_VERY_IMPORTANT.txt index 69520db98..143ee462f 100755 --- a/.ci/windows_base_files/README_VERY_IMPORTANT.txt +++ b/.ci/windows_base_files/README_VERY_IMPORTANT.txt @@ -18,8 +18,10 @@ You can download the stable diffusion 1.5 one from: https://huggingface.co/runwa RECOMMENDED WAY TO UPDATE: -To update only the ComfyUI code: update\update_comfyui_only.bat +To update the ComfyUI code: update\update_comfyui.bat -To update ComfyUI with the python dependencies (ONLY USE IF YOU NEED TO UPDATE THE PYTHON PACKAGES): update\update_all.bat +To update ComfyUI with the python dependencies: +update\ONLY_RUN_THIS_IF_YOU_HAVE_TO\update_comfyui_and_python_dependencies.bat + diff --git a/nodes.py b/nodes.py index 650d7f65d..f956eaa6e 100644 --- a/nodes.py +++ b/nodes.py @@ -811,7 +811,7 @@ class SaveImage: if not os.path.exists(self.output_dir): os.makedirs(self.output_dir) - + paths = list() for image in images: i = 255. * image.cpu().numpy() @@ -835,11 +835,11 @@ class PreviewImage(SaveImage): @classmethod def INPUT_TYPES(s): - return {"required": + return {"required": {"images": ("IMAGE", ), }, "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}, } - + class LoadImage: input_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "input") @classmethod From 10cdddec3e6f4ef887b09328e3642d5089f6ab50 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 14 Mar 2023 21:38:59 -0400 Subject: [PATCH 35/49] Don't need to be that explicit. --- .../update_comfyui_and_python_dependencies.bat | 3 --- .../update_windows/update_comfyui_and_python_dependencies.bat | 3 +++ .ci/nightly/windows_base_files/README_VERY_IMPORTANT.txt | 2 +- .../update_comfyui_and_python_dependencies.bat | 3 --- .ci/update_windows/update_comfyui_and_python_dependencies.bat | 3 +++ .ci/windows_base_files/README_VERY_IMPORTANT.txt | 2 +- 6 files changed, 8 insertions(+), 8 deletions(-) delete mode 100755 .ci/nightly/update_windows/ONLY_RUN_THIS_IF_YOU_HAVE_TO/update_comfyui_and_python_dependencies.bat create mode 100755 .ci/nightly/update_windows/update_comfyui_and_python_dependencies.bat delete mode 100755 .ci/update_windows/ONLY_RUN_THIS_IF_YOU_HAVE_TO/update_comfyui_and_python_dependencies.bat create mode 100755 .ci/update_windows/update_comfyui_and_python_dependencies.bat diff --git a/.ci/nightly/update_windows/ONLY_RUN_THIS_IF_YOU_HAVE_TO/update_comfyui_and_python_dependencies.bat b/.ci/nightly/update_windows/ONLY_RUN_THIS_IF_YOU_HAVE_TO/update_comfyui_and_python_dependencies.bat deleted file mode 100755 index d58e3341e..000000000 --- a/.ci/nightly/update_windows/ONLY_RUN_THIS_IF_YOU_HAVE_TO/update_comfyui_and_python_dependencies.bat +++ /dev/null @@ -1,3 +0,0 @@ -..\..\python_embeded\python.exe ..\update.py ..\..\ComfyUI\ -..\..\python_embeded\python.exe -s -m pip install --upgrade --pre torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu118 -r ../../ComfyUI/requirements.txt pygit2 -pause diff --git a/.ci/nightly/update_windows/update_comfyui_and_python_dependencies.bat b/.ci/nightly/update_windows/update_comfyui_and_python_dependencies.bat new file mode 100755 index 000000000..c5e0c6be7 --- /dev/null +++ b/.ci/nightly/update_windows/update_comfyui_and_python_dependencies.bat @@ -0,0 +1,3 @@ +..\python_embeded\python.exe .\update.py ..\ComfyUI\ +..\python_embeded\python.exe -s -m pip install --upgrade --pre torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu118 -r ../ComfyUI/requirements.txt pygit2 +pause diff --git a/.ci/nightly/windows_base_files/README_VERY_IMPORTANT.txt b/.ci/nightly/windows_base_files/README_VERY_IMPORTANT.txt index 7066f91bf..656b9db43 100755 --- a/.ci/nightly/windows_base_files/README_VERY_IMPORTANT.txt +++ b/.ci/nightly/windows_base_files/README_VERY_IMPORTANT.txt @@ -24,4 +24,4 @@ To update the ComfyUI code: update\update_comfyui.bat To update ComfyUI with the python dependencies: -update\ONLY_RUN_THIS_IF_YOU_HAVE_TO\update_comfyui_and_python_dependencies.bat +update\update_comfyui_and_python_dependencies.bat diff --git a/.ci/update_windows/ONLY_RUN_THIS_IF_YOU_HAVE_TO/update_comfyui_and_python_dependencies.bat b/.ci/update_windows/ONLY_RUN_THIS_IF_YOU_HAVE_TO/update_comfyui_and_python_dependencies.bat deleted file mode 100755 index 514621935..000000000 --- a/.ci/update_windows/ONLY_RUN_THIS_IF_YOU_HAVE_TO/update_comfyui_and_python_dependencies.bat +++ /dev/null @@ -1,3 +0,0 @@ -..\..\python_embeded\python.exe ..\update.py ..\..\ComfyUI\ -..\..\python_embeded\python.exe -s -m pip install --upgrade torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu117 xformers -r ../../ComfyUI/requirements.txt pygit2 -pause diff --git a/.ci/update_windows/update_comfyui_and_python_dependencies.bat b/.ci/update_windows/update_comfyui_and_python_dependencies.bat new file mode 100755 index 000000000..b7308550d --- /dev/null +++ b/.ci/update_windows/update_comfyui_and_python_dependencies.bat @@ -0,0 +1,3 @@ +..\python_embeded\python.exe .\update.py ..\ComfyUI\ +..\python_embeded\python.exe -s -m pip install --upgrade torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu117 xformers -r ../ComfyUI/requirements.txt pygit2 +pause diff --git a/.ci/windows_base_files/README_VERY_IMPORTANT.txt b/.ci/windows_base_files/README_VERY_IMPORTANT.txt index 143ee462f..a6214e735 100755 --- a/.ci/windows_base_files/README_VERY_IMPORTANT.txt +++ b/.ci/windows_base_files/README_VERY_IMPORTANT.txt @@ -23,5 +23,5 @@ To update the ComfyUI code: update\update_comfyui.bat To update ComfyUI with the python dependencies: -update\ONLY_RUN_THIS_IF_YOU_HAVE_TO\update_comfyui_and_python_dependencies.bat +update\update_comfyui_and_python_dependencies.bat From f5c4ffed0549f537d348f20cc1ec936b9b5ba78c Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 14 Mar 2023 21:46:59 -0400 Subject: [PATCH 36/49] Test workflow for cu118 test build. --- .github/workflows/windows_release_cu118.yml | 61 +++++++++++++++++++++ 1 file changed, 61 insertions(+) create mode 100644 .github/workflows/windows_release_cu118.yml diff --git a/.github/workflows/windows_release_cu118.yml b/.github/workflows/windows_release_cu118.yml new file mode 100644 index 000000000..05a818419 --- /dev/null +++ b/.github/workflows/windows_release_cu118.yml @@ -0,0 +1,61 @@ +name: "Windows Release cu118" + +on: + workflow_dispatch: +# push: +# branches: +# - master + +jobs: + build: + permissions: + contents: "write" + packages: "write" + pull-requests: "read" + runs-on: windows-latest + steps: + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + + - run: | + cd .. + cp ComfyUI/.ci/setup_windows_zip.ps1 ./ + cp -r ComfyUI ComfyUI_copy + + Invoke-WebRequest -URI https://www.python.org/ftp/python/3.10.9/python-3.10.9-embed-amd64.zip -O python_embeded.zip + Expand-Archive python_embeded.zip + cd python_embeded + Add-Content -Path .\python310._pth -Value 'import site' + Invoke-WebRequest -Uri https://bootstrap.pypa.io/get-pip.py -OutFile get-pip.py + .\python.exe get-pip.py + .\python.exe -s -m pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu118 xformers -r ../ComfyUI/requirements.txt pygit2 + "../ComfyUI`n" + (Get-Content .\python310._pth -Raw) | Set-Content .\python310._pth + cd .. + + + mkdir ComfyUI_windows_portable + mv python_embeded ComfyUI_windows_portable + mv ComfyUI_copy ComfyUI_windows_portable/ComfyUI + + cd ComfyUI_windows_portable + + mkdir update + cp -r ComfyUI/.ci/update_windows/* ./update/ + cp -r ComfyUI/.ci/windows_base_files/* ./ + + cd .. + + & "C:\Program Files\7-Zip\7z.exe" a -t7z -m0=lzma -mx=8 -mfb=64 -md=32m -ms=on ComfyUI_windows_portable.7z ComfyUI_windows_portable + mv ComfyUI_windows_portable.7z ComfyUI/ComfyUI_windows_portable_nvidia_cu118_or_cpu.7z + + ls + + - name: Upload binaries to release + uses: svenstaro/upload-release-action@v2 + with: + repo_token: ${{ secrets.GITHUB_TOKEN }} + file: ComfyUI_windows_portable_nvidia_cu118_or_cpu.7z + tag: "latest" + overwrite: true + From 0ad3be29afad5e5b824142cee0634dd33bdd22de Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 14 Mar 2023 22:09:15 -0400 Subject: [PATCH 37/49] Try to make the workflow actually fail when there's a problem. --- .github/workflows/windows_release_cu118.yml | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/.github/workflows/windows_release_cu118.yml b/.github/workflows/windows_release_cu118.yml index 05a818419..65f0d29bb 100644 --- a/.github/workflows/windows_release_cu118.yml +++ b/.github/workflows/windows_release_cu118.yml @@ -20,7 +20,6 @@ jobs: - run: | cd .. - cp ComfyUI/.ci/setup_windows_zip.ps1 ./ cp -r ComfyUI ComfyUI_copy Invoke-WebRequest -URI https://www.python.org/ftp/python/3.10.9/python-3.10.9-embed-amd64.zip -O python_embeded.zip @@ -29,8 +28,13 @@ jobs: Add-Content -Path .\python310._pth -Value 'import site' Invoke-WebRequest -Uri https://bootstrap.pypa.io/get-pip.py -OutFile get-pip.py .\python.exe get-pip.py - .\python.exe -s -m pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu118 xformers -r ../ComfyUI/requirements.txt pygit2 "../ComfyUI`n" + (Get-Content .\python310._pth -Raw) | Set-Content .\python310._pth + + - shell: bash + run: | + cd .. + cd python_embeded + ./python.exe -s -m pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu118 xformers -r ../ComfyUI/requirements.txt pygit2 cd .. @@ -46,7 +50,7 @@ jobs: cd .. - & "C:\Program Files\7-Zip\7z.exe" a -t7z -m0=lzma -mx=8 -mfb=64 -md=32m -ms=on ComfyUI_windows_portable.7z ComfyUI_windows_portable + "C:\Program Files\7-Zip\7z.exe" a -t7z -m0=lzma -mx=8 -mfb=64 -md=32m -ms=on ComfyUI_windows_portable.7z ComfyUI_windows_portable mv ComfyUI_windows_portable.7z ComfyUI/ComfyUI_windows_portable_nvidia_cu118_or_cpu.7z ls From b27692ad5f5b196c286fa77899c84dd9c99463b0 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 14 Mar 2023 23:02:57 -0400 Subject: [PATCH 38/49] Do a quick test on the CI to see if ComfyUI actually runs before pushing the build. --- .github/workflows/windows_release_cu118.yml | 22 ++++++++++----------- main.py | 3 +++ 2 files changed, 13 insertions(+), 12 deletions(-) diff --git a/.github/workflows/windows_release_cu118.yml b/.github/workflows/windows_release_cu118.yml index 65f0d29bb..b757a540e 100644 --- a/.github/workflows/windows_release_cu118.yml +++ b/.github/workflows/windows_release_cu118.yml @@ -18,23 +18,18 @@ jobs: with: fetch-depth: 0 - - run: | - cd .. - cp -r ComfyUI ComfyUI_copy - - Invoke-WebRequest -URI https://www.python.org/ftp/python/3.10.9/python-3.10.9-embed-amd64.zip -O python_embeded.zip - Expand-Archive python_embeded.zip - cd python_embeded - Add-Content -Path .\python310._pth -Value 'import site' - Invoke-WebRequest -Uri https://bootstrap.pypa.io/get-pip.py -OutFile get-pip.py - .\python.exe get-pip.py - "../ComfyUI`n" + (Get-Content .\python310._pth -Raw) | Set-Content .\python310._pth - - shell: bash run: | cd .. + cp -r ComfyUI ComfyUI_copy + wget https://www.python.org/ftp/python/3.10.9/python-3.10.9-embed-amd64.zip -O python_embeded.zip + unzip python_embeded.zip cd python_embeded + echo 'import site' >> ./python310._pth + wget https://bootstrap.pypa.io/get-pip.py -O get-pip.py + ./python.exe get-pip.py ./python.exe -s -m pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu118 xformers -r ../ComfyUI/requirements.txt pygit2 + sed -i '1i../ComfyUI' ./python310._pth cd .. @@ -53,6 +48,9 @@ jobs: "C:\Program Files\7-Zip\7z.exe" a -t7z -m0=lzma -mx=8 -mfb=64 -md=32m -ms=on ComfyUI_windows_portable.7z ComfyUI_windows_portable mv ComfyUI_windows_portable.7z ComfyUI/ComfyUI_windows_portable_nvidia_cu118_or_cpu.7z + cd ComfyUI_windows_portable + python_embeded/python.exe -s ComfyUI/main.py --quick-test-for-ci --cpu + ls - name: Upload binaries to release diff --git a/main.py b/main.py index a7f6541fb..3c03381d6 100644 --- a/main.py +++ b/main.py @@ -86,6 +86,9 @@ if __name__ == "__main__": except: pass + if '--quick-test-for-ci' in sys.argv: + exit(0) + call_on_start = None if "--windows-standalone-build" in sys.argv: def startup_server(address, port): From 7f25155a74d77083375da5597df31e535453fb2f Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 14 Mar 2023 23:17:50 -0400 Subject: [PATCH 39/49] More proper ci workflows. --- .ci/setup_windows_zip.ps1 | 25 ------------- .ci/setup_windows_zip_nightly_pytorch.ps1 | 27 -------------- .github/workflows/windows_release.yml | 35 ++++++++++++++++-- .github/workflows/windows_release_cu118.yml | 2 +- .../windows_release_nightly_pytorch.yml | 36 +++++++++++++++++-- 5 files changed, 66 insertions(+), 59 deletions(-) delete mode 100755 .ci/setup_windows_zip.ps1 delete mode 100755 .ci/setup_windows_zip_nightly_pytorch.ps1 diff --git a/.ci/setup_windows_zip.ps1 b/.ci/setup_windows_zip.ps1 deleted file mode 100755 index 6b38f498b..000000000 --- a/.ci/setup_windows_zip.ps1 +++ /dev/null @@ -1,25 +0,0 @@ -Invoke-WebRequest -URI https://www.python.org/ftp/python/3.10.9/python-3.10.9-embed-amd64.zip -O python_embeded.zip -Expand-Archive python_embeded.zip -cd python_embeded -Add-Content -Path .\python310._pth -Value 'import site' -Invoke-WebRequest -Uri https://bootstrap.pypa.io/get-pip.py -OutFile get-pip.py -.\python.exe get-pip.py -.\python.exe -s -m pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu117 xformers -r ../ComfyUI/requirements.txt pygit2 -"../ComfyUI`n" + (Get-Content .\python310._pth -Raw) | Set-Content .\python310._pth -cd .. - - -mkdir ComfyUI_windows_portable -mv python_embeded ComfyUI_windows_portable -mv ComfyUI_copy ComfyUI_windows_portable/ComfyUI - -cd ComfyUI_windows_portable - -mkdir update -cp -r ComfyUI/.ci/update_windows/* ./update/ -cp -r ComfyUI/.ci/windows_base_files/* ./ - -cd .. - -& "C:\Program Files\7-Zip\7z.exe" a -t7z -m0=lzma -mx=8 -mfb=64 -md=32m -ms=on ComfyUI_windows_portable.7z ComfyUI_windows_portable -mv ComfyUI_windows_portable.7z ComfyUI/ComfyUI_windows_portable_nvidia_or_cpu.7z diff --git a/.ci/setup_windows_zip_nightly_pytorch.ps1 b/.ci/setup_windows_zip_nightly_pytorch.ps1 deleted file mode 100755 index 31721e5f6..000000000 --- a/.ci/setup_windows_zip_nightly_pytorch.ps1 +++ /dev/null @@ -1,27 +0,0 @@ -Invoke-WebRequest -URI https://www.python.org/ftp/python/3.10.9/python-3.10.9-embed-amd64.zip -O python_embeded.zip -Expand-Archive python_embeded.zip -rm python_embeded.zip -cd python_embeded -Add-Content -Path .\python310._pth -Value 'import site' -Invoke-WebRequest -Uri https://bootstrap.pypa.io/get-pip.py -OutFile get-pip.py -.\python.exe get-pip.py -python -m pip wheel torch torchvision torchaudio --pre --extra-index-url https://download.pytorch.org/whl/nightly/cu118 -r ../ComfyUI/requirements.txt pygit2 -w ../temp_wheel_dir -ls ../temp_wheel_dir -.\python.exe -s -m pip install --pre (get-item ..\temp_wheel_dir\*) -"../ComfyUI`n" + (Get-Content .\python310._pth -Raw) | Set-Content .\python310._pth -cd .. - -mkdir ComfyUI_windows_portable_nightly_pytorch -mv python_embeded ComfyUI_windows_portable_nightly_pytorch -mv ComfyUI_copy ComfyUI_windows_portable_nightly_pytorch/ComfyUI - -cd ComfyUI_windows_portable_nightly_pytorch - -mkdir update -cp -r ComfyUI/.ci/nightly/update_windows/* ./update/ -cp -r ComfyUI/.ci/nightly/windows_base_files/* ./ - -cd .. - -& "C:\Program Files\7-Zip\7z.exe" a -t7z -m0=lzma -mx=8 -mfb=64 -md=32m -ms=on ComfyUI_windows_portable_nightly_pytorch.7z ComfyUI_windows_portable_nightly_pytorch -mv ComfyUI_windows_portable_nightly_pytorch.7z ComfyUI/ComfyUI_windows_portable_nvidia_or_cpu_nightly_pytorch.7z diff --git a/.github/workflows/windows_release.yml b/.github/workflows/windows_release.yml index be48b7eae..1b2694a35 100644 --- a/.github/workflows/windows_release.yml +++ b/.github/workflows/windows_release.yml @@ -18,13 +18,42 @@ jobs: with: fetch-depth: 0 - - run: | + - shell: bash + run: | cd .. - cp ComfyUI/.ci/setup_windows_zip.ps1 ./ cp -r ComfyUI ComfyUI_copy - .\setup_windows_zip.ps1 + wget https://www.python.org/ftp/python/3.10.9/python-3.10.9-embed-amd64.zip -O python_embeded.zip + unzip python_embeded.zip -d python_embeded + cd python_embeded + echo 'import site' >> ./python310._pth + wget https://bootstrap.pypa.io/get-pip.py -O get-pip.py + ./python.exe get-pip.py + ./python.exe -s -m pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu117 xformers -r ../ComfyUI/requirements.txt pygit2 + sed -i '1i../ComfyUI' ./python310._pth + cd .. + + + mkdir ComfyUI_windows_portable + mv python_embeded ComfyUI_windows_portable + mv ComfyUI_copy ComfyUI_windows_portable/ComfyUI + + cd ComfyUI_windows_portable + + mkdir update + cp -r ComfyUI/.ci/update_windows/* ./update/ + cp -r ComfyUI/.ci/windows_base_files/* ./ + + cd .. + + "C:\Program Files\7-Zip\7z.exe" a -t7z -m0=lzma -mx=8 -mfb=64 -md=32m -ms=on ComfyUI_windows_portable.7z ComfyUI_windows_portable + mv ComfyUI_windows_portable.7z ComfyUI/ComfyUI_windows_portable_nvidia_or_cpu.7z + + cd ComfyUI_windows_portable + python_embeded/python.exe -s ComfyUI/main.py --quick-test-for-ci --cpu + ls + - name: Upload binaries to release uses: svenstaro/upload-release-action@v2 with: diff --git a/.github/workflows/windows_release_cu118.yml b/.github/workflows/windows_release_cu118.yml index b757a540e..773483f6c 100644 --- a/.github/workflows/windows_release_cu118.yml +++ b/.github/workflows/windows_release_cu118.yml @@ -23,7 +23,7 @@ jobs: cd .. cp -r ComfyUI ComfyUI_copy wget https://www.python.org/ftp/python/3.10.9/python-3.10.9-embed-amd64.zip -O python_embeded.zip - unzip python_embeded.zip + unzip python_embeded.zip -d python_embeded cd python_embeded echo 'import site' >> ./python310._pth wget https://bootstrap.pypa.io/get-pip.py -O get-pip.py diff --git a/.github/workflows/windows_release_nightly_pytorch.yml b/.github/workflows/windows_release_nightly_pytorch.yml index 1aeaef45a..2679b0b6f 100644 --- a/.github/workflows/windows_release_nightly_pytorch.yml +++ b/.github/workflows/windows_release_nightly_pytorch.yml @@ -20,11 +20,41 @@ jobs: - uses: actions/setup-python@v4 with: python-version: '3.10.9' - - run: | + - shell: bash + run: | cd .. - cp ComfyUI/.ci/setup_windows_zip_nightly_pytorch.ps1 ./ cp -r ComfyUI ComfyUI_copy - .\setup_windows_zip_nightly_pytorch.ps1 + wget https://www.python.org/ftp/python/3.10.9/python-3.10.9-embed-amd64.zip -O python_embeded.zip + unzip python_embeded.zip -d python_embeded + cd python_embeded + echo 'import site' >> ./python310._pth + wget https://bootstrap.pypa.io/get-pip.py -O get-pip.py + ./python.exe get-pip.py + python -m pip wheel torch torchvision torchaudio --pre --extra-index-url https://download.pytorch.org/whl/nightly/cu118 -r ../ComfyUI/requirements.txt pygit2 -w ../temp_wheel_dir + ls ../temp_wheel_dir + ./python.exe -s -m pip install --pre ../temp_wheel_dir/* + sed -i '1i../ComfyUI' ./python310._pth + cd .. + + + mkdir ComfyUI_windows_portable_nightly_pytorch + mv python_embeded ComfyUI_windows_portable_nightly_pytorch + mv ComfyUI_copy ComfyUI_windows_portable_nightly_pytorch/ComfyUI + + cd ComfyUI_windows_portable_nightly_pytorch + + mkdir update + cp -r ComfyUI/.ci/update_windows/* ./update/ + cp -r ComfyUI/.ci/windows_base_files/* ./ + + cd .. + + "C:\Program Files\7-Zip\7z.exe" a -t7z -m0=lzma -mx=8 -mfb=64 -md=32m -ms=on ComfyUI_windows_portable_nightly_pytorch.7z ComfyUI_windows_portable_nightly_pytorch + mv ComfyUI_windows_portable_nightly_pytorch.7z ComfyUI/ComfyUI_windows_portable_nvidia_or_cpu_nightly_pytorch.7z + + cd ComfyUI_windows_portable_nightly_pytorch + python_embeded/python.exe -s ComfyUI/main.py --quick-test-for-ci --cpu + ls - name: Upload binaries to release From 4058b8884097af2c5ae615a88cdacc2c53b10087 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 14 Mar 2023 23:22:41 -0400 Subject: [PATCH 40/49] CI fix. --- .github/workflows/windows_release.yml | 4 ++-- .github/workflows/windows_release_cu118.yml | 4 ++-- .github/workflows/windows_release_nightly_pytorch.yml | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/windows_release.yml b/.github/workflows/windows_release.yml index 1b2694a35..3f7d4d739 100644 --- a/.github/workflows/windows_release.yml +++ b/.github/workflows/windows_release.yml @@ -22,11 +22,11 @@ jobs: run: | cd .. cp -r ComfyUI ComfyUI_copy - wget https://www.python.org/ftp/python/3.10.9/python-3.10.9-embed-amd64.zip -O python_embeded.zip + curl https://www.python.org/ftp/python/3.10.9/python-3.10.9-embed-amd64.zip -o python_embeded.zip unzip python_embeded.zip -d python_embeded cd python_embeded echo 'import site' >> ./python310._pth - wget https://bootstrap.pypa.io/get-pip.py -O get-pip.py + curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py ./python.exe get-pip.py ./python.exe -s -m pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu117 xformers -r ../ComfyUI/requirements.txt pygit2 sed -i '1i../ComfyUI' ./python310._pth diff --git a/.github/workflows/windows_release_cu118.yml b/.github/workflows/windows_release_cu118.yml index 773483f6c..cd0ca9a62 100644 --- a/.github/workflows/windows_release_cu118.yml +++ b/.github/workflows/windows_release_cu118.yml @@ -22,11 +22,11 @@ jobs: run: | cd .. cp -r ComfyUI ComfyUI_copy - wget https://www.python.org/ftp/python/3.10.9/python-3.10.9-embed-amd64.zip -O python_embeded.zip + curl https://www.python.org/ftp/python/3.10.9/python-3.10.9-embed-amd64.zip -o python_embeded.zip unzip python_embeded.zip -d python_embeded cd python_embeded echo 'import site' >> ./python310._pth - wget https://bootstrap.pypa.io/get-pip.py -O get-pip.py + curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py ./python.exe get-pip.py ./python.exe -s -m pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu118 xformers -r ../ComfyUI/requirements.txt pygit2 sed -i '1i../ComfyUI' ./python310._pth diff --git a/.github/workflows/windows_release_nightly_pytorch.yml b/.github/workflows/windows_release_nightly_pytorch.yml index 2679b0b6f..291d754e3 100644 --- a/.github/workflows/windows_release_nightly_pytorch.yml +++ b/.github/workflows/windows_release_nightly_pytorch.yml @@ -24,11 +24,11 @@ jobs: run: | cd .. cp -r ComfyUI ComfyUI_copy - wget https://www.python.org/ftp/python/3.10.9/python-3.10.9-embed-amd64.zip -O python_embeded.zip + curl https://www.python.org/ftp/python/3.10.9/python-3.10.9-embed-amd64.zip -o python_embeded.zip unzip python_embeded.zip -d python_embeded cd python_embeded echo 'import site' >> ./python310._pth - wget https://bootstrap.pypa.io/get-pip.py -O get-pip.py + curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py ./python.exe get-pip.py python -m pip wheel torch torchvision torchaudio --pre --extra-index-url https://download.pytorch.org/whl/nightly/cu118 -r ../ComfyUI/requirements.txt pygit2 -w ../temp_wheel_dir ls ../temp_wheel_dir From d4e16ef5100abb7a41c9d6dd317ef1ef45331137 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 15 Mar 2023 01:58:27 -0400 Subject: [PATCH 41/49] Update install instructions for torch. --- README.md | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index 8877d4495..b5a0c48ff 100644 --- a/README.md +++ b/README.md @@ -56,19 +56,14 @@ At the time of writing this pytorch has issues with python versions higher than ### AMD (Linux only) AMD users can install rocm and pytorch with pip if you don't have it already installed, this is the command to install the stable version: -```pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/rocm5.2``` - - -I highly recommend you use the nightly/unstable pytorch builds though because they work a lot better for me (run this in the ComfyUI folder so it picks up the requirements.txt): - -```pip install --upgrade --pre torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/nightly/rocm5.4.2 -r requirements.txt``` +```pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/rocm5.4.2``` ### NVIDIA Nvidia users should install torch using this command: -```pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu117``` +```pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu118``` Nvidia users should also install Xformers for a speed boost but can still run the software without it. From 6b33c39fa8cbcaad3887f77a6886ec555c8d114a Mon Sep 17 00:00:00 2001 From: m957ymj75urz Date: Wed, 15 Mar 2023 11:44:49 +0100 Subject: [PATCH 42/49] ignore/restore custom batch count when extra options is unchecked --- web/scripts/ui.js | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/web/scripts/ui.js b/web/scripts/ui.js index 8134e3415..cb31f10e9 100644 --- a/web/scripts/ui.js +++ b/web/scripts/ui.js @@ -258,7 +258,12 @@ export class ComfyUI { $el("button.comfy-queue-btn", { textContent: "Queue Prompt", onclick: () => app.queuePrompt(0, this.batchCount) }), $el("div", {}, [ $el("label", { innerHTML: "Extra options"}, [ - $el("input", { type: "checkbox", onchange: (i) => document.getElementById('extraOptions').style.visibility = i.srcElement.checked ? "visible" : "collapse" }) + $el("input", { type: "checkbox", + onchange: (i) => { + document.getElementById('extraOptions').style.visibility = i.srcElement.checked ? "visible" : "collapse"; + this.batchCount = i.srcElement.checked ? document.getElementById('batchCountInputRange').value : 1; + } + }) ]) ]), $el("div", { id: "extraOptions", style: { width: "100%", visibility: "collapse" }}, [ From 680bf0bb9e74a272ae585f9d871a733b3d1a40aa Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 15 Mar 2023 14:32:52 -0400 Subject: [PATCH 43/49] I think it looks a bit better like this. --- web/scripts/ui.js | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/web/scripts/ui.js b/web/scripts/ui.js index cb31f10e9..c056371e1 100644 --- a/web/scripts/ui.js +++ b/web/scripts/ui.js @@ -260,13 +260,13 @@ export class ComfyUI { $el("label", { innerHTML: "Extra options"}, [ $el("input", { type: "checkbox", onchange: (i) => { - document.getElementById('extraOptions').style.visibility = i.srcElement.checked ? "visible" : "collapse"; + document.getElementById('extraOptions').style.display = i.srcElement.checked ? "block" : "none"; this.batchCount = i.srcElement.checked ? document.getElementById('batchCountInputRange').value : 1; } }) ]) ]), - $el("div", { id: "extraOptions", style: { width: "100%", visibility: "collapse" }}, [ + $el("div", { id: "extraOptions", style: { width: "100%", display: "none" }}, [ $el("label", { innerHTML: "Batch count" }, [ $el("input", { id: "batchCountInputNumber", type: "number", value: this.batchCount, min: "1", style: { width: "35%", "margin-left": "0.4em" }, oninput: (i) => { From a2136acb6ca721cba2521d46ce3dbfb069fb4f16 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 15 Mar 2023 15:18:18 -0400 Subject: [PATCH 44/49] Prevent model_management from being loaded twice. --- comfy_extras/nodes_upscale_model.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/comfy_extras/nodes_upscale_model.py b/comfy_extras/nodes_upscale_model.py index 98e9863e1..965e5e7a6 100644 --- a/comfy_extras/nodes_upscale_model.py +++ b/comfy_extras/nodes_upscale_model.py @@ -1,7 +1,7 @@ import os from comfy_extras.chainner_models import model_loading from comfy.sd import load_torch_file -import comfy.model_management +import model_management from nodes import filter_files_extensions, recursive_search, supported_ckpt_extensions import torch import comfy.utils @@ -38,7 +38,7 @@ class ImageUpscaleWithModel: CATEGORY = "image/upscaling" def upscale(self, upscale_model, image): - device = comfy.model_management.get_torch_device() + device = model_management.get_torch_device() upscale_model.to(device) in_img = image.movedim(-1,-3).to(device) s = comfy.utils.tiled_scale(in_img, lambda a: upscale_model(a), tile_x=128 + 64, tile_y=128 + 64, overlap = 8, upscale_amount=upscale_model.scale) From 46d01dd0496feb7a47b530f31ee46dfc6c4b231f Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 15 Mar 2023 17:58:13 -0400 Subject: [PATCH 45/49] Fix bug when applying controlnet to negative prompt. --- nodes.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nodes.py b/nodes.py index f956eaa6e..766be75dc 100644 --- a/nodes.py +++ b/nodes.py @@ -691,8 +691,8 @@ def common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, if t.shape[0] < noise.shape[0]: t = torch.cat([t] * noise.shape[0]) t = t.to(device) - if 'control' in p[1]: - control_nets += [p[1]['control']] + if 'control' in n[1]: + control_nets += [n[1]['control']] negative_copy += [[t] + n[1:]] control_net_models = [] From f0d99d065096dbb8a315863915f9758f927ffb83 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 16 Mar 2023 01:59:53 -0400 Subject: [PATCH 46/49] Put a command that actually works for installing xformers for nvidia. --- README.md | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index b5a0c48ff..277d5d997 100644 --- a/README.md +++ b/README.md @@ -61,13 +61,9 @@ AMD users can install rocm and pytorch with pip if you don't have it already ins ### NVIDIA -Nvidia users should install torch using this command: +Nvidia users should install torch and xformers using this command: -```pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu118``` - -Nvidia users should also install Xformers for a speed boost but can still run the software without it. - -```pip install xformers``` +```pip install torch==1.13.1 torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu117 xformers``` #### Troubleshooting From b35b11be9d9a3473bfff24e31553b12744e9d567 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 16 Mar 2023 02:36:42 -0400 Subject: [PATCH 47/49] Test github workflow for standalone build with cu118 torch+xformers. --- ...update_comfyui_and_python_dependencies.bat | 3 + .../README_VERY_IMPORTANT.txt | 2 +- .github/workflows/windows_release_cu118.yml | 84 ++++++++++++++++++- 3 files changed, 85 insertions(+), 4 deletions(-) create mode 100755 .ci/update_windows_cu118/update_comfyui_and_python_dependencies.bat diff --git a/.ci/update_windows_cu118/update_comfyui_and_python_dependencies.bat b/.ci/update_windows_cu118/update_comfyui_and_python_dependencies.bat new file mode 100755 index 000000000..4fb011a32 --- /dev/null +++ b/.ci/update_windows_cu118/update_comfyui_and_python_dependencies.bat @@ -0,0 +1,3 @@ +..\python_embeded\python.exe .\update.py ..\ComfyUI\ +..\python_embeded\python.exe -s -m pip install --upgrade torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu118 xformers -r ../ComfyUI/requirements.txt pygit2 +pause diff --git a/.ci/windows_base_files/README_VERY_IMPORTANT.txt b/.ci/windows_base_files/README_VERY_IMPORTANT.txt index a6214e735..c19085320 100755 --- a/.ci/windows_base_files/README_VERY_IMPORTANT.txt +++ b/.ci/windows_base_files/README_VERY_IMPORTANT.txt @@ -22,6 +22,6 @@ To update the ComfyUI code: update\update_comfyui.bat -To update ComfyUI with the python dependencies: +To update ComfyUI with the python dependencies, note that you should ONLY run this if you have issues with python dependencies. update\update_comfyui_and_python_dependencies.bat diff --git a/.github/workflows/windows_release_cu118.yml b/.github/workflows/windows_release_cu118.yml index cd0ca9a62..c680d7b87 100644 --- a/.github/workflows/windows_release_cu118.yml +++ b/.github/workflows/windows_release_cu118.yml @@ -7,17 +7,94 @@ on: # - master jobs: - build: + build_dependencies: + env: + # you need at least cuda 5.0 for some of the stuff compiled here. + TORCH_CUDA_ARCH_LIST: "5.0+PTX 6.0 6.1 7.0 7.5 8.0 8.6 8.9" + FORCE_CUDA: 1 + MAX_JOBS: 1 # will crash otherwise + DISTUTILS_USE_SDK: 1 # otherwise distutils will complain on windows about multiple versions of msvc + XFORMERS_BUILD_TYPE: "Release" + runs-on: windows-latest + steps: + - name: Cache Built Dependencies + uses: actions/cache@v3 + id: cache-cu118_python_stuff + with: + path: cu118_python_deps.tar + key: ${{ runner.os }}-build-cu118 + + - if: ${{ steps.cache-cu118_python_stuff.cache-hit != 'true' }} + uses: actions/checkout@v3 + + - if: ${{ steps.cache-cu118_python_stuff.cache-hit != 'true' }} + uses: actions/setup-python@v4 + with: + python-version: '3.10.9' + + - if: ${{ steps.cache-cu118_python_stuff.cache-hit != 'true' }} + uses: comfyanonymous/cuda-toolkit@test + id: cuda-toolkit + with: + cuda: '11.8.0' + # copied from xformers github + - name: Setup MSVC + uses: ilammy/msvc-dev-cmd@v1 + - name: Configure Pagefile + # windows runners will OOM with many CUDA architectures + # we cheat here with a page file + uses: al-cheb/configure-pagefile-action@v1.3 + with: + minimum-size: 2GB + # really unfortunate: https://github.com/ilammy/msvc-dev-cmd#name-conflicts-with-shell-bash + - name: Remove link.exe + shell: bash + run: rm /usr/bin/link + + - if: ${{ steps.cache-cu118_python_stuff.cache-hit != 'true' }} + shell: bash + run: | + python -m pip wheel --no-cache-dir torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu118 -r requirements.txt pygit2 -w ./temp_wheel_dir + python -m pip install --no-cache-dir ./temp_wheel_dir/* + echo installed basic + git clone --recurse-submodules https://github.com/facebookresearch/xformers.git + cd xformers + python -m pip install --no-cache-dir wheel setuptools twine + echo building xformers + python setup.py bdist_wheel -d ../temp_wheel_dir/ + cd .. + rm -rf xformers + ls -lah temp_wheel_dir + mv temp_wheel_dir cu118_python_deps + tar cf cu118_python_deps.tar cu118_python_deps + - uses: actions/upload-artifact@v3 + with: + name: cu118_python_deps + path: cu118_python_deps.tar + + + package_comfyui: + needs: build_dependencies permissions: contents: "write" packages: "write" pull-requests: "read" runs-on: windows-latest steps: + - uses: actions/download-artifact@v3 + with: + name: cu118_python_deps + - shell: bash + run: | + mv cu118_python_deps.tar ../ + cd .. + tar xf cu118_python_deps.tar + pwd + ls + - uses: actions/checkout@v3 with: fetch-depth: 0 - - shell: bash run: | cd .. @@ -28,7 +105,7 @@ jobs: echo 'import site' >> ./python310._pth curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py ./python.exe get-pip.py - ./python.exe -s -m pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu118 xformers -r ../ComfyUI/requirements.txt pygit2 + ./python.exe -s -m pip install ../cu118_python_deps/* sed -i '1i../ComfyUI' ./python310._pth cd .. @@ -41,6 +118,7 @@ jobs: mkdir update cp -r ComfyUI/.ci/update_windows/* ./update/ + cp -r ComfyUI/.ci/update_windows_cu118/* ./update/ cp -r ComfyUI/.ci/windows_base_files/* ./ cd .. From 99d0dc972260c08900b9a56b5f3ccd0790032625 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 16 Mar 2023 11:52:49 -0400 Subject: [PATCH 48/49] Add note to update bat. --- .../update_comfyui_and_python_dependencies.bat | 1 + 1 file changed, 1 insertion(+) diff --git a/.ci/update_windows_cu118/update_comfyui_and_python_dependencies.bat b/.ci/update_windows_cu118/update_comfyui_and_python_dependencies.bat index 4fb011a32..1e45075f3 100755 --- a/.ci/update_windows_cu118/update_comfyui_and_python_dependencies.bat +++ b/.ci/update_windows_cu118/update_comfyui_and_python_dependencies.bat @@ -1,3 +1,4 @@ ..\python_embeded\python.exe .\update.py ..\ComfyUI\ ..\python_embeded\python.exe -s -m pip install --upgrade torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu118 xformers -r ../ComfyUI/requirements.txt pygit2 +echo NOTE If you get an error with pip you can ignore it, it's pip being pip as usual, your ComfyUI should have updated anyways. pause From c34d4e939d74ce5769ca74d10c32f17855e38141 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 16 Mar 2023 17:10:08 -0400 Subject: [PATCH 49/49] Fix VAEEncodeForInpaint modifying source image. --- nodes.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nodes.py b/nodes.py index 766be75dc..9a878b441 100644 --- a/nodes.py +++ b/nodes.py @@ -189,6 +189,7 @@ class VAEEncodeForInpaint: y = (pixels.shape[2] // 64) * 64 mask = torch.nn.functional.interpolate(mask[None,None,], size=(pixels.shape[1], pixels.shape[2]), mode="bilinear")[0][0] + pixels = pixels.clone() if pixels.shape[1] != x or pixels.shape[2] != y: pixels = pixels[:,:x,:y,:] mask = mask[:x,:y]