merge master, remove dirty args

This commit is contained in:
ljleb 2023-03-19 18:46:23 -04:00
parent eabd0f7894
commit cae4a7fb06
24 changed files with 773 additions and 167 deletions

View File

@ -1,4 +1,11 @@
@echo off
..\python_embeded\python.exe .\update.py ..\ComfyUI\
..\python_embeded\python.exe -s -m pip install --upgrade torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu118 xformers -r ../ComfyUI/requirements.txt pygit2
echo NOTE If you get an error with pip you can ignore it, it's pip being pip as usual, your ComfyUI should have updated anyways.
echo
echo This will try to update pytorch and all python dependencies, if you get an error wait for pytorch/xformers to fix their stuff
echo You should not be running this anyways unless you really have to
echo
echo If you just want to update normally, close this and run update_comfyui.bat instead.
echo
pause
..\python_embeded\python.exe -s -m pip install --upgrade torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu118 xformers -r ../ComfyUI/requirements.txt pygit2
pause

View File

@ -24,15 +24,15 @@ jobs:
path: cu118_python_deps.tar
key: ${{ runner.os }}-build-cu118
- if: ${{ steps.cache-cu118_python_stuff.cache-hit != 'true' }}
- if: steps.cache-cu118_python_stuff.outputs.cache-hit != 'true'
uses: actions/checkout@v3
- if: ${{ steps.cache-cu118_python_stuff.cache-hit != 'true' }}
- if: steps.cache-cu118_python_stuff.outputs.cache-hit != 'true'
uses: actions/setup-python@v4
with:
python-version: '3.10.9'
- if: ${{ steps.cache-cu118_python_stuff.cache-hit != 'true' }}
- if: steps.cache-cu118_python_stuff.outputs.cache-hit != 'true'
uses: comfyanonymous/cuda-toolkit@test
id: cuda-toolkit
with:
@ -51,7 +51,7 @@ jobs:
shell: bash
run: rm /usr/bin/link
- if: ${{ steps.cache-cu118_python_stuff.cache-hit != 'true' }}
- if: steps.cache-cu118_python_stuff.outputs.cache-hit != 'true'
shell: bash
run: |
python -m pip wheel --no-cache-dir torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu118 -r requirements.txt pygit2 -w ./temp_wheel_dir
@ -71,6 +71,7 @@ jobs:
with:
name: cu118_python_deps
path: cu118_python_deps.tar
retention-days: 1
package_comfyui:
@ -124,7 +125,7 @@ jobs:
cd ..
"C:\Program Files\7-Zip\7z.exe" a -t7z -m0=lzma -mx=8 -mfb=64 -md=32m -ms=on ComfyUI_windows_portable.7z ComfyUI_windows_portable
mv ComfyUI_windows_portable.7z ComfyUI/ComfyUI_windows_portable_nvidia_cu118_or_cpu.7z
mv ComfyUI_windows_portable.7z ComfyUI/new_ComfyUI_windows_portable_nvidia_cu118_or_cpu.7z
cd ComfyUI_windows_portable
python_embeded/python.exe -s ComfyUI/main.py --quick-test-for-ci --cpu
@ -135,7 +136,7 @@ jobs:
uses: svenstaro/upload-release-action@v2
with:
repo_token: ${{ secrets.GITHUB_TOKEN }}
file: ComfyUI_windows_portable_nvidia_cu118_or_cpu.7z
file: new_ComfyUI_windows_portable_nvidia_cu118_or_cpu.7z
tag: "latest"
overwrite: true

View File

@ -0,0 +1,71 @@
name: "Windows Release cu118 dependencies"
on:
workflow_dispatch:
# push:
# branches:
# - master
jobs:
build_dependencies:
env:
# you need at least cuda 5.0 for some of the stuff compiled here.
TORCH_CUDA_ARCH_LIST: "5.0+PTX 6.0 6.1 7.0 7.5 8.0 8.6 8.9"
FORCE_CUDA: 1
MAX_JOBS: 1 # will crash otherwise
DISTUTILS_USE_SDK: 1 # otherwise distutils will complain on windows about multiple versions of msvc
XFORMERS_BUILD_TYPE: "Release"
runs-on: windows-latest
steps:
- name: Cache Built Dependencies
uses: actions/cache@v3
id: cache-cu118_python_stuff
with:
path: cu118_python_deps.tar
key: ${{ runner.os }}-build-cu118
- if: steps.cache-cu118_python_stuff.outputs.cache-hit != 'true'
uses: actions/checkout@v3
- if: steps.cache-cu118_python_stuff.outputs.cache-hit != 'true'
uses: actions/setup-python@v4
with:
python-version: '3.10.9'
- if: steps.cache-cu118_python_stuff.outputs.cache-hit != 'true'
uses: comfyanonymous/cuda-toolkit@test
id: cuda-toolkit
with:
cuda: '11.8.0'
# copied from xformers github
- name: Setup MSVC
uses: ilammy/msvc-dev-cmd@v1
- name: Configure Pagefile
# windows runners will OOM with many CUDA architectures
# we cheat here with a page file
uses: al-cheb/configure-pagefile-action@v1.3
with:
minimum-size: 2GB
# really unfortunate: https://github.com/ilammy/msvc-dev-cmd#name-conflicts-with-shell-bash
- name: Remove link.exe
shell: bash
run: rm /usr/bin/link
- if: steps.cache-cu118_python_stuff.outputs.cache-hit != 'true'
shell: bash
run: |
python -m pip wheel --no-cache-dir torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu118 -r requirements.txt pygit2 -w ./temp_wheel_dir
python -m pip install --no-cache-dir ./temp_wheel_dir/*
echo installed basic
git clone --recurse-submodules https://github.com/facebookresearch/xformers.git
cd xformers
python -m pip install --no-cache-dir wheel setuptools twine
echo building xformers
python setup.py bdist_wheel -d ../temp_wheel_dir/
cd ..
rm -rf xformers
ls -lah temp_wheel_dir
mv temp_wheel_dir cu118_python_deps
tar cf cu118_python_deps.tar cu118_python_deps

View File

@ -0,0 +1,76 @@
name: "Windows Release cu118 packaging"
on:
workflow_dispatch:
# push:
# branches:
# - master
jobs:
package_comfyui:
permissions:
contents: "write"
packages: "write"
pull-requests: "read"
runs-on: windows-latest
steps:
- uses: actions/cache/restore@v3
id: cache
with:
path: cu118_python_deps.tar
key: ${{ runner.os }}-build-cu118
- shell: bash
run: |
mv cu118_python_deps.tar ../
cd ..
tar xf cu118_python_deps.tar
pwd
ls
- uses: actions/checkout@v3
with:
fetch-depth: 0
- shell: bash
run: |
cd ..
cp -r ComfyUI ComfyUI_copy
curl https://www.python.org/ftp/python/3.10.9/python-3.10.9-embed-amd64.zip -o python_embeded.zip
unzip python_embeded.zip -d python_embeded
cd python_embeded
echo 'import site' >> ./python310._pth
curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py
./python.exe get-pip.py
./python.exe -s -m pip install ../cu118_python_deps/*
sed -i '1i../ComfyUI' ./python310._pth
cd ..
mkdir ComfyUI_windows_portable
mv python_embeded ComfyUI_windows_portable
mv ComfyUI_copy ComfyUI_windows_portable/ComfyUI
cd ComfyUI_windows_portable
mkdir update
cp -r ComfyUI/.ci/update_windows/* ./update/
cp -r ComfyUI/.ci/update_windows_cu118/* ./update/
cp -r ComfyUI/.ci/windows_base_files/* ./
cd ..
"C:\Program Files\7-Zip\7z.exe" a -t7z -m0=lzma -mx=8 -mfb=64 -md=32m -ms=on ComfyUI_windows_portable.7z ComfyUI_windows_portable
mv ComfyUI_windows_portable.7z ComfyUI/new_ComfyUI_windows_portable_nvidia_cu118_or_cpu.7z
cd ComfyUI_windows_portable
python_embeded/python.exe -s ComfyUI/main.py --quick-test-for-ci --cpu
ls
- name: Upload binaries to release
uses: svenstaro/upload-release-action@v2
with:
repo_token: ${{ secrets.GITHUB_TOKEN }}
file: new_ComfyUI_windows_portable_nvidia_cu118_or_cpu.7z
tag: "latest"
overwrite: true

2
.gitignore vendored
View File

@ -5,3 +5,5 @@ models/checkpoints
models/vae
models/embeddings
models/loras
venv/
.idea/

View File

@ -35,7 +35,7 @@ Workflow examples can be found on the [Examples page](https://comfyanonymous.git
There is a portable standalone build for Windows that should work for running on Nvidia GPUs or for running on your CPU only on the [releases page](https://github.com/comfyanonymous/ComfyUI/releases).
### [Direct link to download](https://github.com/comfyanonymous/ComfyUI/releases/download/latest/ComfyUI_windows_portable_nvidia_or_cpu.7z)
### [Direct link to download](https://github.com/comfyanonymous/ComfyUI/releases/download/latest/ComfyUI_windows_portable_nvidia_cu118_or_cpu.7z)
Just download, extract and run. Make sure you put your Stable Diffusion checkpoints/models (the huge ckpt/safetensors files) in: ComfyUI\models\checkpoints

View File

@ -41,16 +41,16 @@ def sampling_function(model_function, x, timestep, uncond, cond, cond_scale, con
rr = 8
if area[2] != 0:
for t in range(rr):
mult[:,:,area[2]+t:area[2]+1+t,:] *= ((1.0/rr) * (t + 1))
mult[:,:,t:1+t,:] *= ((1.0/rr) * (t + 1))
if (area[0] + area[2]) < x_in.shape[2]:
for t in range(rr):
mult[:,:,area[0] + area[2] - 1 - t:area[0] + area[2] - t,:] *= ((1.0/rr) * (t + 1))
mult[:,:,area[0] - 1 - t:area[0] - t,:] *= ((1.0/rr) * (t + 1))
if area[3] != 0:
for t in range(rr):
mult[:,:,:,area[3]+t:area[3]+1+t] *= ((1.0/rr) * (t + 1))
mult[:,:,:,t:1+t] *= ((1.0/rr) * (t + 1))
if (area[1] + area[3]) < x_in.shape[3]:
for t in range(rr):
mult[:,:,:,area[1] + area[3] - 1 - t:area[1] + area[3] - t] *= ((1.0/rr) * (t + 1))
mult[:,:,:,area[1] - 1 - t:area[1] - t] *= ((1.0/rr) * (t + 1))
conditionning = {}
conditionning['c_crossattn'] = cond[0]
if cond_concat_in is not None and len(cond_concat_in) > 0:

View File

@ -527,8 +527,10 @@ def load_controlnet(ckpt_path, model=None):
elif key in controlnet_data:
pass
else:
print("error checkpoint does not contain controlnet data", ckpt_path)
return None
net = load_t2i_adapter(controlnet_data)
if net is None:
print("error checkpoint does not contain controlnet or t2i adapter data", ckpt_path)
return net
context_dim = controlnet_data[key].shape[1]
@ -593,6 +595,9 @@ def load_controlnet(ckpt_path, model=None):
else:
control_model.load_state_dict(controlnet_data, strict=False)
if use_fp16:
control_model = control_model.half()
control = ControlNet(control_model)
return control
@ -682,15 +687,16 @@ class T2IAdapter:
out += self.previous_controlnet.get_control_models()
return out
def load_t2i_adapter(ckpt_path, model=None):
t2i_data = load_torch_file(ckpt_path)
def load_t2i_adapter(t2i_data):
keys = t2i_data.keys()
if "body.0.in_conv.weight" in keys:
cin = t2i_data['body.0.in_conv.weight'].shape[1]
model_ad = adapter.Adapter_light(cin=cin, channels=[320, 640, 1280, 1280], nums_rb=4)
else:
elif 'conv_in.weight' in keys:
cin = t2i_data['conv_in.weight'].shape[1]
model_ad = adapter.Adapter(cin=cin, channels=[320, 640, 1280, 1280][:4], nums_rb=2, ksize=1, sk=True, use_conv=False)
else:
return None
model_ad.load_state_dict(t2i_data)
return T2IAdapter(model_ad, cin // 64)

View File

@ -168,19 +168,28 @@ def unescape_important(text):
return text
def load_embed(embedding_name, embedding_directory):
embed_path = os.path.join(embedding_directory, embedding_name)
if not os.path.isfile(embed_path):
extensions = ['.safetensors', '.pt', '.bin']
valid_file = None
for x in extensions:
t = embed_path + x
if os.path.isfile(t):
valid_file = t
break
if valid_file is None:
return None
if isinstance(embedding_directory, str):
embedding_directory = [embedding_directory]
valid_file = None
for embed_dir in embedding_directory:
embed_path = os.path.join(embed_dir, embedding_name)
if not os.path.isfile(embed_path):
extensions = ['.safetensors', '.pt', '.bin']
for x in extensions:
t = embed_path + x
if os.path.isfile(t):
valid_file = t
break
else:
embed_path = valid_file
valid_file = embed_path
if valid_file is not None:
break
if valid_file is None:
return None
embed_path = valid_file
if embed_path.lower().endswith(".safetensors"):
import safetensors.torch

View File

@ -2,17 +2,14 @@ import os
from comfy_extras.chainner_models import model_loading
from comfy.sd import load_torch_file
import model_management
from nodes import filter_files_extensions, recursive_search, supported_ckpt_extensions, extract_arg_values
import torch
import comfy.utils
import folder_paths
class UpscaleModelLoader:
models_dir = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), "models")
upscale_model_dir = os.path.join(models_dir, "upscale_models")
@classmethod
def INPUT_TYPES(s):
return {"required": { "model_name": (filter_files_extensions(recursive_search(s.upscale_model_dir, *extract_arg_values('--upscaler-dir')), supported_ckpt_extensions), ),
return {"required": { "model_name": (folder_paths.get_filename_list("upscale_models"), ),
}}
RETURN_TYPES = ("UPSCALE_MODEL",)
FUNCTION = "load_model"
@ -20,7 +17,7 @@ class UpscaleModelLoader:
CATEGORY = "loaders"
def load_model(self, model_name):
model_path = os.path.join(self.upscale_model_dir, model_name)
model_path = folder_paths.get_full_path("upscale_models", model_name)
sd = load_torch_file(model_path)
out = model_loading.load_state_dict(sd).eval()
return (out, )

View File

@ -0,0 +1,23 @@
#Rename this to extra_model_paths.yaml and ComfyUI will load it
#config for a1111 ui
#all you have to do is change the base_path to where yours is installed
a111:
base_path: path/to/stable-diffusion-webui/
checkpoints: models/Stable-diffusion
configs: models/Stable-diffusion
vae: models/VAE
loras: models/Lora
upscale_models: |
models/ESRGAN
models/SwinIR
embeddings: embeddings
controlnet: models/ControlNet
#other_ui:
# base_path: path/to/ui
# checkpoints: models/checkpoints

69
folder_paths.py Normal file
View File

@ -0,0 +1,69 @@
import os
supported_ckpt_extensions = set(['.ckpt', '.pth'])
supported_pt_extensions = set(['.ckpt', '.pt', '.bin', '.pth'])
try:
import safetensors.torch
supported_ckpt_extensions.add('.safetensors')
supported_pt_extensions.add('.safetensors')
except:
print("Could not import safetensors, safetensors support disabled.")
folder_names_and_paths = {}
models_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "models")
folder_names_and_paths["checkpoints"] = ([os.path.join(models_dir, "checkpoints")], supported_ckpt_extensions)
folder_names_and_paths["configs"] = ([os.path.join(models_dir, "configs")], [".yaml"])
folder_names_and_paths["loras"] = ([os.path.join(models_dir, "loras")], supported_pt_extensions)
folder_names_and_paths["vae"] = ([os.path.join(models_dir, "vae")], supported_pt_extensions)
folder_names_and_paths["clip"] = ([os.path.join(models_dir, "clip")], supported_pt_extensions)
folder_names_and_paths["clip_vision"] = ([os.path.join(models_dir, "clip_vision")], supported_pt_extensions)
folder_names_and_paths["style_models"] = ([os.path.join(models_dir, "style_models")], supported_pt_extensions)
folder_names_and_paths["embeddings"] = ([os.path.join(models_dir, "embeddings")], supported_pt_extensions)
folder_names_and_paths["controlnet"] = ([os.path.join(models_dir, "controlnet"), os.path.join(models_dir, "t2i_adapter")], supported_pt_extensions)
folder_names_and_paths["upscale_models"] = ([os.path.join(models_dir, "upscale_models")], supported_pt_extensions)
def add_model_folder_path(folder_name, full_folder_path):
global folder_names_and_paths
if folder_name in folder_names_and_paths:
folder_names_and_paths[folder_name][0].append(full_folder_path)
def get_folder_paths(folder_name):
return folder_names_and_paths[folder_name][0][:]
def recursive_search(directory):
result = []
for root, subdir, file in os.walk(directory, followlinks=True):
for filepath in file:
#we os.path,join directory with a blank string to generate a path separator at the end.
result.append(os.path.join(root, filepath).replace(os.path.join(directory,''),''))
return result
def filter_files_extensions(files, extensions):
return sorted(list(filter(lambda a: os.path.splitext(a)[-1].lower() in extensions, files)))
def get_full_path(folder_name, filename):
global folder_names_and_paths
folders = folder_names_and_paths[folder_name]
for x in folders[0]:
full_path = os.path.join(x, filename)
if os.path.isfile(full_path):
return full_path
def get_filename_list(folder_name):
global folder_names_and_paths
output_list = set()
folders = folder_names_and_paths[folder_name]
for x in folders[0]:
output_list.update(filter_files_extensions(recursive_search(x), folders[1]))
return sorted(list(output_list))

31
main.py
View File

@ -46,6 +46,8 @@ if __name__ == "__main__":
import execution
import server
import folder_paths
import yaml
def prompt_worker(q, server):
e = execution.PromptExecutor(server)
@ -72,6 +74,26 @@ def cleanup_temp():
if os.path.exists(temp_dir):
shutil.rmtree(temp_dir, ignore_errors=True)
def load_extra_path_config(yaml_path):
with open(yaml_path, 'r') as stream:
config = yaml.safe_load(stream)
for c in config:
conf = config[c]
if conf is None:
continue
base_path = None
if "base_path" in conf:
base_path = conf.pop("base_path")
for x in conf:
for y in conf[x].split("\n"):
if len(y) == 0:
continue
full_path = y
if base_path is not None:
full_path = os.path.join(base_path, full_path)
print("Adding extra search path", x, full_path)
folder_paths.add_model_folder_path(x, full_path)
if __name__ == "__main__":
cleanup_temp()
@ -92,6 +114,15 @@ if __name__ == "__main__":
if '--dont-print-server' in sys.argv:
dont_print = True
extra_model_paths_config_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "extra_model_paths.yaml")
if os.path.isfile(extra_model_paths_config_path):
load_extra_path_config(extra_model_paths_config_path)
if '--extra-model-paths-config' in sys.argv:
indices = [(i + 1) for i in range(len(sys.argv) - 1) if sys.argv[i] == '--extra-model-paths-config']
for i in indices:
load_extra_path_config(sys.argv[i])
port = 8188
try:
p_index = sys.argv.index('--port')

115
nodes.py
View File

@ -23,35 +23,7 @@ import comfy_extras.clip_vision
import model_management
import importlib
supported_ckpt_extensions = ['.ckpt', '.pth']
supported_pt_extensions = ['.ckpt', '.pt', '.bin', '.pth']
try:
import safetensors.torch
supported_ckpt_extensions += ['.safetensors']
supported_pt_extensions += ['.safetensors']
except:
print("Could not import safetensors, safetensors support disabled.")
def extract_arg_values(option):
result = []
for i in range(len(sys.argv) - 1):
if sys.argv[i] == option:
result.append(sys.argv[i + 1])
i += 1
return result
def recursive_search(*directories):
result = []
for directory in directories:
for root, subdir, file in os.walk(directory, followlinks=True):
for filepath in file:
#we os.path,join directory with a blank string to generate a path separator at the end.
result.append(os.path.join(root, filepath).replace(os.path.join(directory,''),''))
return result
def filter_files_extensions(files, extensions):
return sorted(list(filter(lambda a: os.path.splitext(a)[-1].lower() in extensions, files)))
import folder_paths
def before_node_execution():
model_management.throw_exception_if_processing_interrupted()
@ -198,6 +170,7 @@ class VAEEncodeForInpaint:
y = (pixels.shape[2] // 64) * 64
mask = torch.nn.functional.interpolate(mask[None,None,], size=(pixels.shape[1], pixels.shape[2]), mode="bilinear")[0][0]
pixels = pixels.clone()
if pixels.shape[1] != x or pixels.shape[2] != y:
pixels = pixels[:,:x,:y,:]
mask = mask[:x,:y]
@ -215,32 +188,24 @@ class VAEEncodeForInpaint:
return ({"samples":t, "noise_mask": (mask_erosion[0][:x,:y].round())}, )
class CheckpointLoader:
models_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "models")
config_dir = os.path.join(models_dir, "configs")
ckpt_dir = os.path.join(models_dir, "checkpoints")
embedding_directory = os.path.join(models_dir, "embeddings")
@classmethod
def INPUT_TYPES(s):
return {"required": { "config_name": (filter_files_extensions(recursive_search(s.config_dir), '.yaml'), ),
"ckpt_name": (filter_files_extensions(recursive_search(s.ckpt_dir, *extract_arg_values('--ckpt-dir')), supported_ckpt_extensions), )}}
return {"required": { "config_name": (folder_paths.get_filename_list("configs"), ),
"ckpt_name": (folder_paths.get_filename_list("checkpoints"), )}}
RETURN_TYPES = ("MODEL", "CLIP", "VAE")
FUNCTION = "load_checkpoint"
CATEGORY = "loaders"
def load_checkpoint(self, config_name, ckpt_name, output_vae=True, output_clip=True):
config_path = os.path.join(self.config_dir, config_name)
ckpt_path = os.path.join(self.ckpt_dir, ckpt_name)
return comfy.sd.load_checkpoint(config_path, ckpt_path, output_vae=True, output_clip=True, embedding_directory=self.embedding_directory)
config_path = folder_paths.get_full_path("configs", config_name)
ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name)
return comfy.sd.load_checkpoint(config_path, ckpt_path, output_vae=True, output_clip=True, embedding_directory=folder_paths.get_folder_paths("embeddings"))
class CheckpointLoaderSimple:
models_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "models")
ckpt_dir = os.path.join(models_dir, "checkpoints")
@classmethod
def INPUT_TYPES(s):
return {"required": { "ckpt_name": (filter_files_extensions(recursive_search(s.ckpt_dir, *extract_arg_values('--ckpt-dir')), supported_ckpt_extensions), ),
return {"required": { "ckpt_name": (folder_paths.get_filename_list("checkpoints"), ),
}}
RETURN_TYPES = ("MODEL", "CLIP", "VAE")
FUNCTION = "load_checkpoint"
@ -248,8 +213,8 @@ class CheckpointLoaderSimple:
CATEGORY = "loaders"
def load_checkpoint(self, ckpt_name, output_vae=True, output_clip=True):
ckpt_path = os.path.join(self.ckpt_dir, ckpt_name)
out = comfy.sd.load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, embedding_directory=CheckpointLoader.embedding_directory)
ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name)
out = comfy.sd.load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, embedding_directory=folder_paths.get_folder_paths("embeddings"))
return out
class CLIPSetLastLayer:
@ -269,13 +234,11 @@ class CLIPSetLastLayer:
return (clip,)
class LoraLoader:
models_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "models")
lora_dir = os.path.join(models_dir, "loras")
@classmethod
def INPUT_TYPES(s):
return {"required": { "model": ("MODEL",),
"clip": ("CLIP", ),
"lora_name": (filter_files_extensions(recursive_search(s.lora_dir, *extract_arg_values('--lora-dir')), supported_pt_extensions), ),
"lora_name": (folder_paths.get_filename_list("loras"), ),
"strength_model": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
"strength_clip": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
}}
@ -285,16 +248,14 @@ class LoraLoader:
CATEGORY = "loaders"
def load_lora(self, model, clip, lora_name, strength_model, strength_clip):
lora_path = os.path.join(self.lora_dir, lora_name)
lora_path = folder_paths.get_full_path("loras", lora_name)
model_lora, clip_lora = comfy.sd.load_lora_for_models(model, clip, lora_path, strength_model, strength_clip)
return (model_lora, clip_lora)
class VAELoader:
models_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "models")
vae_dir = os.path.join(models_dir, "vae")
@classmethod
def INPUT_TYPES(s):
return {"required": { "vae_name": (filter_files_extensions(recursive_search(s.vae_dir, *extract_arg_values('--vae-dir')), supported_pt_extensions), )}}
return {"required": { "vae_name": (folder_paths.get_filename_list("vae"), )}}
RETURN_TYPES = ("VAE",)
FUNCTION = "load_vae"
@ -302,16 +263,14 @@ class VAELoader:
#TODO: scale factor?
def load_vae(self, vae_name):
vae_path = os.path.join(self.vae_dir, vae_name)
vae_path = folder_paths.get_full_path("vae", vae_name)
vae = comfy.sd.VAE(ckpt_path=vae_path)
return (vae,)
class ControlNetLoader:
models_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "models")
controlnet_dir = os.path.join(models_dir, "controlnet")
@classmethod
def INPUT_TYPES(s):
return {"required": { "control_net_name": (filter_files_extensions(recursive_search(s.controlnet_dir, *extract_arg_values('--controlnet-dir')), supported_pt_extensions), )}}
return {"required": { "control_net_name": (folder_paths.get_filename_list("controlnet"), )}}
RETURN_TYPES = ("CONTROL_NET",)
FUNCTION = "load_controlnet"
@ -319,17 +278,15 @@ class ControlNetLoader:
CATEGORY = "loaders"
def load_controlnet(self, control_net_name):
controlnet_path = os.path.join(self.controlnet_dir, control_net_name)
controlnet_path = folder_paths.get_full_path("controlnet", control_net_name)
controlnet = comfy.sd.load_controlnet(controlnet_path)
return (controlnet,)
class DiffControlNetLoader:
models_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "models")
controlnet_dir = os.path.join(models_dir, "controlnet")
@classmethod
def INPUT_TYPES(s):
return {"required": { "model": ("MODEL",),
"control_net_name": (filter_files_extensions(recursive_search(s.controlnet_dir, *extract_arg_values('--controlnet-dir')), supported_pt_extensions), )}}
"control_net_name": (folder_paths.get_filename_list("controlnet"), )}}
RETURN_TYPES = ("CONTROL_NET",)
FUNCTION = "load_controlnet"
@ -337,7 +294,7 @@ class DiffControlNetLoader:
CATEGORY = "loaders"
def load_controlnet(self, model, control_net_name):
controlnet_path = os.path.join(self.controlnet_dir, control_net_name)
controlnet_path = folder_paths.get_full_path("controlnet", control_net_name)
controlnet = comfy.sd.load_controlnet(controlnet_path, model)
return (controlnet,)
@ -368,29 +325,10 @@ class ControlNetApply:
c.append(n)
return (c, )
class T2IAdapterLoader:
models_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "models")
t2i_adapter_dir = os.path.join(models_dir, "t2i_adapter")
@classmethod
def INPUT_TYPES(s):
return {"required": { "t2i_adapter_name": (filter_files_extensions(recursive_search(s.t2i_adapter_dir, *extract_arg_values('--t2i-dir')), supported_pt_extensions), )}}
RETURN_TYPES = ("CONTROL_NET",)
FUNCTION = "load_t2i_adapter"
CATEGORY = "loaders"
def load_t2i_adapter(self, t2i_adapter_name):
t2i_path = os.path.join(self.t2i_adapter_dir, t2i_adapter_name)
t2i_adapter = comfy.sd.load_t2i_adapter(t2i_path)
return (t2i_adapter,)
class CLIPLoader:
models_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "models")
clip_dir = os.path.join(models_dir, "clip")
@classmethod
def INPUT_TYPES(s):
return {"required": { "clip_name": (filter_files_extensions(recursive_search(s.clip_dir, *extract_arg_values('--clip-dir')), supported_pt_extensions), ),
return {"required": { "clip_name": (folder_paths.get_filename_list("clip"), ),
}}
RETURN_TYPES = ("CLIP",)
FUNCTION = "load_clip"
@ -398,16 +336,14 @@ class CLIPLoader:
CATEGORY = "loaders"
def load_clip(self, clip_name):
clip_path = os.path.join(self.clip_dir, clip_name)
clip_path = folder_paths.get_full_path("clip", clip_name)
clip = comfy.sd.load_clip(ckpt_path=clip_path, embedding_directory=CheckpointLoader.embedding_directory)
return (clip,)
class CLIPVisionLoader:
models_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "models")
clip_dir = os.path.join(models_dir, "clip_vision")
@classmethod
def INPUT_TYPES(s):
return {"required": { "clip_name": (filter_files_extensions(recursive_search(s.clip_dir, *extract_arg_values('--clip-vision-dir')), supported_pt_extensions), ),
return {"required": { "clip_name": (folder_paths.get_filename_list("clip_vision"), ),
}}
RETURN_TYPES = ("CLIP_VISION",)
FUNCTION = "load_clip"
@ -415,7 +351,7 @@ class CLIPVisionLoader:
CATEGORY = "loaders"
def load_clip(self, clip_name):
clip_path = os.path.join(self.clip_dir, clip_name)
clip_path = folder_paths.get_full_path("clip_vision", clip_name)
clip_vision = comfy_extras.clip_vision.load(clip_path)
return (clip_vision,)
@ -435,11 +371,9 @@ class CLIPVisionEncode:
return (output,)
class StyleModelLoader:
models_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "models")
style_model_dir = os.path.join(models_dir, "style_models")
@classmethod
def INPUT_TYPES(s):
return {"required": { "style_model_name": (filter_files_extensions(recursive_search(s.style_model_dir, *extract_arg_values('--style-model-dir')), supported_pt_extensions), )}}
return {"required": { "style_model_name": (folder_paths.get_filename_list("style_models"), )}}
RETURN_TYPES = ("STYLE_MODEL",)
FUNCTION = "load_style_model"
@ -447,7 +381,7 @@ class StyleModelLoader:
CATEGORY = "loaders"
def load_style_model(self, style_model_name):
style_model_path = os.path.join(self.style_model_dir, style_model_name)
style_model_path = folder_paths.get_full_path("style_models", style_model_name)
style_model = comfy.sd.load_style_model(style_model_path)
return (style_model,)
@ -989,7 +923,6 @@ NODE_CLASS_MAPPINGS = {
"ControlNetApply": ControlNetApply,
"ControlNetLoader": ControlNetLoader,
"DiffControlNetLoader": DiffControlNetLoader,
"T2IAdapterLoader": T2IAdapterLoader,
"StyleModelLoader": StyleModelLoader,
"CLIPVisionLoader": CLIPVisionLoader,
"VAEDecodeTiled": VAEDecodeTiled,

View File

@ -81,13 +81,13 @@
"\n",
"\n",
"# T2I-Adapter\n",
"#!wget -c https://huggingface.co/TencentARC/T2I-Adapter/resolve/main/models/t2iadapter_depth_sd14v1.pth -P ./models/t2i_adapter/\n",
"#!wget -c https://huggingface.co/TencentARC/T2I-Adapter/resolve/main/models/t2iadapter_seg_sd14v1.pth -P ./models/t2i_adapter/\n",
"#!wget -c https://huggingface.co/TencentARC/T2I-Adapter/resolve/main/models/t2iadapter_sketch_sd14v1.pth -P ./models/t2i_adapter/\n",
"#!wget -c https://huggingface.co/TencentARC/T2I-Adapter/resolve/main/models/t2iadapter_keypose_sd14v1.pth -P ./models/t2i_adapter/\n",
"#!wget -c https://huggingface.co/TencentARC/T2I-Adapter/resolve/main/models/t2iadapter_openpose_sd14v1.pth -P ./models/t2i_adapter/\n",
"#!wget -c https://huggingface.co/TencentARC/T2I-Adapter/resolve/main/models/t2iadapter_color_sd14v1.pth -P ./models/t2i_adapter/\n",
"#!wget -c https://huggingface.co/TencentARC/T2I-Adapter/resolve/main/models/t2iadapter_canny_sd14v1.pth -P ./models/t2i_adapter/\n",
"#!wget -c https://huggingface.co/TencentARC/T2I-Adapter/resolve/main/models/t2iadapter_depth_sd14v1.pth -P ./models/controlnet/\n",
"#!wget -c https://huggingface.co/TencentARC/T2I-Adapter/resolve/main/models/t2iadapter_seg_sd14v1.pth -P ./models/controlnet/\n",
"#!wget -c https://huggingface.co/TencentARC/T2I-Adapter/resolve/main/models/t2iadapter_sketch_sd14v1.pth -P ./models/controlnet/\n",
"#!wget -c https://huggingface.co/TencentARC/T2I-Adapter/resolve/main/models/t2iadapter_keypose_sd14v1.pth -P ./models/controlnet/\n",
"#!wget -c https://huggingface.co/TencentARC/T2I-Adapter/resolve/main/models/t2iadapter_openpose_sd14v1.pth -P ./models/controlnet/\n",
"#!wget -c https://huggingface.co/TencentARC/T2I-Adapter/resolve/main/models/t2iadapter_color_sd14v1.pth -P ./models/controlnet/\n",
"#!wget -c https://huggingface.co/TencentARC/T2I-Adapter/resolve/main/models/t2iadapter_canny_sd14v1.pth -P ./models/controlnet/\n",
"\n",
"# T2I Styles Model\n",
"#!wget -c https://huggingface.co/TencentARC/T2I-Adapter/resolve/main/models/t2iadapter_style_sd14v1.pth -P ./models/style_models/\n",
@ -122,7 +122,6 @@
"source": [
"### Run ComfyUI with localtunnel (Recommended Way)\n",
"\n",
"use the **fp16** model configs for more speed\n",
"\n"
],
"metadata": {
@ -166,7 +165,6 @@
"cell_type": "markdown",
"source": [
"### Run ComfyUI with colab iframe (use only in case the previous way with localtunnel doesn't work)\n",
"use the **fp16** model configs for more speed\n",
"\n",
"You should see the ui appear in an iframe. If you get a 403 error, it's your firefox settings or an extension that's messing things up.\n",
"\n",

View File

@ -2,6 +2,7 @@ import os
import sys
import asyncio
import nodes
import folder_paths
import execution
import uuid
import json
@ -73,6 +74,11 @@ class PromptServer():
async def get_root(request):
return web.FileResponse(os.path.join(self.web_root, "index.html"))
@routes.get("/embeddings")
def get_embeddings(self):
embeddings = folder_paths.get_filename_list("embeddings")
return web.json_response(list(map(lambda a: os.path.splitext(a)[0].lower(), embeddings)))
@routes.get("/extensions")
async def get_extensions(request):
files = glob.glob(os.path.join(self.web_root, 'extensions/**/*.js'), recursive=True)

View File

@ -14,11 +14,111 @@ app.registerExtension({
this.addInput("", "*");
this.addOutput(this.properties.showOutputText ? "*" : "", "*");
this.onConnectInput = function (_, type) {
if (type !== this.outputs[0].type) {
this.removeOutput(0);
this.addOutput(this.properties.showOutputText ? type : "", type);
this.size = this.computeSize();
this.onConnectionsChange = function (type, index, connected, link_info) {
// Prevent multiple connections to different types when we have no input
if (connected && type === LiteGraph.OUTPUT) {
// Ignore wildcard nodes as these will be updated to real types
const types = new Set(this.outputs[0].links.map((l) => app.graph.links[l].type).filter((t) => t !== "*"));
if (types.size > 1) {
for (let i = 0; i < this.outputs[0].links.length - 1; i++) {
const linkId = this.outputs[0].links[i];
const link = app.graph.links[linkId];
const node = app.graph.getNodeById(link.target_id);
node.disconnectInput(link.target_slot);
}
}
}
// Find root input
let currentNode = this;
let updateNodes = [];
let inputType = null;
let inputNode = null;
while (currentNode) {
updateNodes.unshift(currentNode);
const linkId = currentNode.inputs[0].link;
if (linkId !== null) {
const link = app.graph.links[linkId];
const node = app.graph.getNodeById(link.origin_id);
const type = node.constructor.type;
if (type === "Reroute") {
// Move the previous node
currentNode = node;
} else {
// We've found the end
inputNode = currentNode;
inputType = node.outputs[link.origin_slot].type;
break;
}
} else {
// This path has no input node
currentNode = null;
break;
}
}
// Find all outputs
const nodes = [this];
let outputType = null;
while (nodes.length) {
currentNode = nodes.pop();
const outputs = (currentNode.outputs ? currentNode.outputs[0].links : []) || [];
if (outputs.length) {
for (const linkId of outputs) {
const link = app.graph.links[linkId];
// When disconnecting sometimes the link is still registered
if (!link) continue;
const node = app.graph.getNodeById(link.target_id);
const type = node.constructor.type;
if (type === "Reroute") {
// Follow reroute nodes
nodes.push(node);
updateNodes.push(node);
} else {
// We've found an output
const nodeOutType = node.inputs[link.target_slot].type;
if (inputType && nodeOutType !== inputType) {
// The output doesnt match our input so disconnect it
node.disconnectInput(link.target_slot);
} else {
outputType = nodeOutType;
}
}
}
} else {
// No more outputs for this path
}
}
const displayType = inputType || outputType || "*";
const color = LGraphCanvas.link_type_colors[displayType];
// Update the types of each node
for (const node of updateNodes) {
// If we dont have an input type we are always wildcard but we'll show the output type
// This lets you change the output link to a different type and all nodes will update
node.outputs[0].type = inputType || "*";
node.__outputType = displayType;
node.outputs[0].name = node.properties.showOutputText ? displayType : "";
node.size = node.computeSize();
for (const l of node.outputs[0].links || []) {
const link = app.graph.links[l];
if (link) {
link.color = color;
}
}
}
if (inputNode) {
const link = app.graph.links[inputNode.inputs[0].link];
if (link) {
link.color = color;
}
}
};
@ -41,12 +141,12 @@ app.registerExtension({
callback: () => {
this.properties.showOutputText = !this.properties.showOutputText;
if (this.properties.showOutputText) {
this.outputs[0].name = this.outputs[0].type;
this.outputs[0].name = this.__outputType || this.outputs[0].type;
} else {
this.outputs[0].name = "";
}
this.size = this.computeSize();
app.graph.setDirtyCanvas(true);
app.graph.setDirtyCanvas(true, true);
},
},
{
@ -61,8 +161,8 @@ app.registerExtension({
computeSize() {
return [
this.properties.showOutputText && this.outputs && this.outputs.length
? Math.max(55, LiteGraph.NODE_TEXT_SIZE * this.outputs[0].name.length * 0.6 + 40)
: 55,
? Math.max(75, LiteGraph.NODE_TEXT_SIZE * this.outputs[0].name.length * 0.6 + 40)
: 75,
26,
];
}
@ -85,6 +185,7 @@ app.registerExtension({
Object.assign(RerouteNode, {
title_mode: LiteGraph.NO_TITLE,
title: "Reroute",
collapsable: false,
})
);

View File

@ -106,6 +106,15 @@ class ComfyApi extends EventTarget {
return await resp.json();
}
/**
* Gets a list of embedding names
* @returns An array of script urls to import
*/
async getEmbeddings() {
const resp = await fetch("/embeddings", { cache: "no-store" });
return await resp.json();
}
/**
* Loads node object definitions for the graph
* @returns The node definitions

View File

@ -2,7 +2,7 @@ import { ComfyWidgets } from "./widgets.js";
import { ComfyUI } from "./ui.js";
import { api } from "./api.js";
import { defaultGraph } from "./defaultGraph.js";
import { getPngMetadata } from "./pnginfo.js";
import { getPngMetadata, importA1111 } from "./pnginfo.js";
class ComfyApp {
constructor() {
@ -614,6 +614,12 @@ class ComfyApp {
if (!graphData) {
graphData = defaultGraph;
}
// Patch T2IAdapterLoader to ControlNetLoader since they are the same node now
for (let n of graphData.nodes) {
if (n.type == "T2IAdapterLoader") n.type = "ControlNetLoader";
}
this.graph.configure(graphData);
for (const node of this.graph._nodes) {
@ -672,24 +678,10 @@ class ComfyApp {
for (let i in node.inputs) {
let parent = node.getInputNode(i);
if (parent) {
let link;
if (parent.isVirtualNode) {
// Follow the path of virtual nodes until we reach the first real one
while (parent != null) {
link = parent.getInputLink(0);
if (link) {
const from = graph.getNodeById(link.origin_id);
if (from.isVirtualNode) {
parent = from;
} else {
parent = null;
}
} else {
parent = null;
}
}
} else {
link = node.getInputLink(i);
let link = node.getInputLink(i);
while (parent && parent.isVirtualNode) {
link = parent.getInputLink(link.origin_slot);
parent = parent.getInputNode(link.origin_slot);
}
if (link) {
@ -743,8 +735,12 @@ class ComfyApp {
async handleFile(file) {
if (file.type === "image/png") {
const pngInfo = await getPngMetadata(file);
if (pngInfo && pngInfo.workflow) {
this.loadGraphData(JSON.parse(pngInfo.workflow));
if (pngInfo) {
if (pngInfo.workflow) {
this.loadGraphData(JSON.parse(pngInfo.workflow));
} else if (pngInfo.parameters) {
importA1111(this.graph, pngInfo.parameters);
}
}
} else if (file.type === "application/json" || file.name.endsWith(".json")) {
const reader = new FileReader();

View File

@ -1,3 +1,5 @@
import { api } from "./api.js";
export function getPngMetadata(file) {
return new Promise((r) => {
const reader = new FileReader();
@ -43,3 +45,262 @@ export function getPngMetadata(file) {
reader.readAsArrayBuffer(file);
});
}
export async function importA1111(graph, parameters) {
const p = parameters.lastIndexOf("\nSteps:");
if (p > -1) {
const embeddings = await api.getEmbeddings();
const opts = parameters
.substr(p)
.split(",")
.reduce((p, n) => {
const s = n.split(":");
p[s[0].trim().toLowerCase()] = s[1].trim();
return p;
}, {});
const p2 = parameters.lastIndexOf("\nNegative prompt:", p);
if (p2 > -1) {
let positive = parameters.substr(0, p2).trim();
let negative = parameters.substring(p2 + 18, p).trim();
const ckptNode = LiteGraph.createNode("CheckpointLoaderSimple");
const clipSkipNode = LiteGraph.createNode("CLIPSetLastLayer");
const positiveNode = LiteGraph.createNode("CLIPTextEncode");
const negativeNode = LiteGraph.createNode("CLIPTextEncode");
const samplerNode = LiteGraph.createNode("KSampler");
const imageNode = LiteGraph.createNode("EmptyLatentImage");
const vaeNode = LiteGraph.createNode("VAEDecode");
const vaeLoaderNode = LiteGraph.createNode("VAELoader");
const saveNode = LiteGraph.createNode("SaveImage");
let hrSamplerNode = null;
const ceil64 = (v) => Math.ceil(v / 64) * 64;
function getWidget(node, name) {
return node.widgets.find((w) => w.name === name);
}
function setWidgetValue(node, name, value, isOptionPrefix) {
const w = getWidget(node, name);
if (isOptionPrefix) {
const o = w.options.values.find((w) => w.startsWith(value));
if (o) {
w.value = o;
} else {
console.warn(`Unknown value '${value}' for widget '${name}'`, node);
w.value = value;
}
} else {
w.value = value;
}
}
function createLoraNodes(clipNode, text, prevClip, prevModel) {
const loras = [];
text = text.replace(/<lora:([^:]+:[^>]+)>/g, function (m, c) {
const s = c.split(":");
const weight = parseFloat(s[1]);
if (isNaN(weight)) {
console.warn("Invalid LORA", m);
} else {
loras.push({ name: s[0], weight });
}
return "";
});
for (const l of loras) {
const loraNode = LiteGraph.createNode("LoraLoader");
graph.add(loraNode);
setWidgetValue(loraNode, "lora_name", l.name, true);
setWidgetValue(loraNode, "strength_model", l.weight);
setWidgetValue(loraNode, "strength_clip", l.weight);
prevModel.node.connect(prevModel.index, loraNode, 0);
prevClip.node.connect(prevClip.index, loraNode, 1);
prevModel = { node: loraNode, index: 0 };
prevClip = { node: loraNode, index: 1 };
}
prevClip.node.connect(1, clipNode, 0);
prevModel.node.connect(0, samplerNode, 0);
if (hrSamplerNode) {
prevModel.node.connect(0, hrSamplerNode, 0);
}
return { text, prevModel, prevClip };
}
function replaceEmbeddings(text) {
return text.replaceAll(
new RegExp(
"\\b(" + embeddings.map((e) => e.replace(/[.*+?^${}()|[\]\\]/g, "\\$&")).join("\\b|\\b") + ")\\b",
"ig"
),
"embedding:$1"
);
}
function popOpt(name) {
const v = opts[name];
delete opts[name];
return v;
}
graph.clear();
graph.add(ckptNode);
graph.add(clipSkipNode);
graph.add(positiveNode);
graph.add(negativeNode);
graph.add(samplerNode);
graph.add(imageNode);
graph.add(vaeNode);
graph.add(vaeLoaderNode);
graph.add(saveNode);
ckptNode.connect(1, clipSkipNode, 0);
clipSkipNode.connect(0, positiveNode, 0);
clipSkipNode.connect(0, negativeNode, 0);
ckptNode.connect(0, samplerNode, 0);
positiveNode.connect(0, samplerNode, 1);
negativeNode.connect(0, samplerNode, 2);
imageNode.connect(0, samplerNode, 3);
vaeNode.connect(0, saveNode, 0);
samplerNode.connect(0, vaeNode, 0);
vaeLoaderNode.connect(0, vaeNode, 1);
const handlers = {
model(v) {
setWidgetValue(ckptNode, "ckpt_name", v, true);
},
"cfg scale"(v) {
setWidgetValue(samplerNode, "cfg", +v);
},
"clip skip"(v) {
setWidgetValue(clipSkipNode, "stop_at_clip_layer", -v);
},
sampler(v) {
let name = v.toLowerCase().replace("++", "pp").replaceAll(" ", "_");
if (name.includes("karras")) {
name = name.replace("karras", "").replace(/_+$/, "");
setWidgetValue(samplerNode, "scheduler", "karras");
} else {
setWidgetValue(samplerNode, "scheduler", "normal");
}
const w = getWidget(samplerNode, "sampler_name");
const o = w.options.values.find((w) => w === name || w === "sample_" + name);
if (o) {
setWidgetValue(samplerNode, "sampler_name", o);
}
},
size(v) {
const wxh = v.split("x");
const w = ceil64(+wxh[0]);
const h = ceil64(+wxh[1]);
const hrUp = popOpt("hires upscale");
const hrSz = popOpt("hires resize");
let hrMethod = popOpt("hires upscaler");
setWidgetValue(imageNode, "width", w);
setWidgetValue(imageNode, "height", h);
if (hrUp || hrSz) {
let uw, uh;
if (hrUp) {
uw = w * hrUp;
uh = h * hrUp;
} else {
const s = hrSz.split("x");
uw = +s[0];
uh = +s[1];
}
let upscaleNode;
let latentNode;
if (hrMethod.startsWith("Latent")) {
latentNode = upscaleNode = LiteGraph.createNode("LatentUpscale");
graph.add(upscaleNode);
samplerNode.connect(0, upscaleNode, 0);
switch (hrMethod) {
case "Latent (nearest-exact)":
hrMethod = "nearest-exact";
break;
}
setWidgetValue(upscaleNode, "upscale_method", hrMethod, true);
} else {
const decode = LiteGraph.createNode("VAEDecodeTiled");
graph.add(decode);
samplerNode.connect(0, decode, 0);
vaeLoaderNode.connect(0, decode, 1);
const upscaleLoaderNode = LiteGraph.createNode("UpscaleModelLoader");
graph.add(upscaleLoaderNode);
setWidgetValue(upscaleLoaderNode, "model_name", hrMethod, true);
const modelUpscaleNode = LiteGraph.createNode("ImageUpscaleWithModel");
graph.add(modelUpscaleNode);
decode.connect(0, modelUpscaleNode, 1);
upscaleLoaderNode.connect(0, modelUpscaleNode, 0);
upscaleNode = LiteGraph.createNode("ImageScale");
graph.add(upscaleNode);
modelUpscaleNode.connect(0, upscaleNode, 0);
const vaeEncodeNode = (latentNode = LiteGraph.createNode("VAEEncodeTiled"));
graph.add(vaeEncodeNode);
upscaleNode.connect(0, vaeEncodeNode, 0);
vaeLoaderNode.connect(0, vaeEncodeNode, 1);
}
setWidgetValue(upscaleNode, "width", ceil64(uw));
setWidgetValue(upscaleNode, "height", ceil64(uh));
hrSamplerNode = LiteGraph.createNode("KSampler");
graph.add(hrSamplerNode);
ckptNode.connect(0, hrSamplerNode, 0);
positiveNode.connect(0, hrSamplerNode, 1);
negativeNode.connect(0, hrSamplerNode, 2);
latentNode.connect(0, hrSamplerNode, 3);
hrSamplerNode.connect(0, vaeNode, 0);
}
},
steps(v) {
setWidgetValue(samplerNode, "steps", +v);
},
seed(v) {
setWidgetValue(samplerNode, "seed", +v);
},
};
for (const opt in opts) {
if (opt in handlers) {
handlers[opt](popOpt(opt));
}
}
if (hrSamplerNode) {
setWidgetValue(hrSamplerNode, "steps", getWidget(samplerNode, "steps").value);
setWidgetValue(hrSamplerNode, "cfg", getWidget(samplerNode, "cfg").value);
setWidgetValue(hrSamplerNode, "scheduler", getWidget(samplerNode, "scheduler").value);
setWidgetValue(hrSamplerNode, "sampler_name", getWidget(samplerNode, "sampler_name").value);
setWidgetValue(hrSamplerNode, "denoise", +(popOpt("denoising strength") || "1"));
}
let n = createLoraNodes(positiveNode, positive, { node: clipSkipNode, index: 0 }, { node: ckptNode, index: 0 });
positive = n.text;
n = createLoraNodes(negativeNode, negative, n.prevClip, n.prevModel);
negative = n.text;
setWidgetValue(positiveNode, "text", replaceEmbeddings(positive));
setWidgetValue(negativeNode, "text", replaceEmbeddings(negative));
graph.arrange();
for (const opt of ["model hash", "ensd"]) {
delete opts[opt];
}
console.warn("Unhandled parameters:", opts);
}
}
}

View File

@ -232,6 +232,7 @@ export class ComfyUI {
this.settings = new ComfySettingsDialog();
this.batchCount = 1;
this.lastQueueSize = 0;
this.queue = new ComfyList("Queue");
this.history = new ComfyList("History");
@ -262,6 +263,7 @@ export class ComfyUI {
onchange: (i) => {
document.getElementById('extraOptions').style.display = i.srcElement.checked ? "block" : "none";
this.batchCount = i.srcElement.checked ? document.getElementById('batchCountInputRange').value : 1;
document.getElementById('autoQueueCheckbox').checked = false;
}
})
])
@ -280,6 +282,8 @@ export class ComfyUI {
document.getElementById('batchCountInputNumber').value = i.srcElement.value;
}
}),
$el("input", { id: "autoQueueCheckbox", type: "checkbox", checked: false, title: "automatically queue prompt when the queue size hits 0",
})
]),
]),
$el("div.comfy-menu-btns", [
@ -332,5 +336,11 @@ export class ComfyUI {
setStatus(status) {
this.queueSize.textContent = "Queue size: " + (status ? status.exec_info.queue_remaining : "ERR");
if (status) {
if (this.lastQueueSize != 0 && status.exec_info.queue_remaining == 0 && document.getElementById('autoQueueCheckbox').checked) {
app.queuePrompt(0, this.batchCount);
}
this.lastQueueSize = status.exec_info.queue_remaining
}
}
}

View File

@ -56,7 +56,7 @@ function addMultilineWidget(node, name, defaultVal, app) {
widget.inputEl = document.createElement("textarea");
widget.inputEl.className = "comfy-multiline-input";
widget.inputEl.value = defaultVal;
document.addEventListener("click", function (event) {
document.addEventListener("mousedown", function (event) {
if (!widget.inputEl.contains(event.target)) {
widget.inputEl.blur();
}