From 5bcbc007ddeb5a784925c6c2c457ef0e2a9354b7 Mon Sep 17 00:00:00 2001 From: City <125218114+city96@users.noreply.github.com> Date: Thu, 30 Mar 2023 23:13:58 +0200 Subject: [PATCH 01/42] Add human-readable names for nodes --- nodes.py | 48 ++++++++++++++++++++++++++++++++++++++++++++++ server.py | 3 ++- web/scripts/app.js | 2 +- 3 files changed, 51 insertions(+), 2 deletions(-) diff --git a/nodes.py b/nodes.py index 6fb7f0175..538787555 100644 --- a/nodes.py +++ b/nodes.py @@ -1018,6 +1018,54 @@ NODE_CLASS_MAPPINGS = { "VAEEncodeTiled": VAEEncodeTiled, } +NODE_DISPLAY_NAME_MAPPINGS = { + # Sampling + "KSampler": "KSampler", + "KSamplerAdvanced": "KSampler (Advanced)", + # Loaders + "CheckpointLoader": "Load Checkpoint", + "CheckpointLoaderSimple": "Load Checkpoint (Simple)", + "VAELoader": "Load VAE", + "LoraLoader": "Load LoRA", + "CLIPLoader": "Load CLIP", + "ControlNetLoader": "Load ControlNet Model", + "DiffControlNetLoader": "Load ControlNet Model (diff)", + "StyleModelLoader": "Load Style Model", + "CLIPVisionLoader": "Load CLIP Vision", + "UpscaleModelLoader": "Load Upscale Model", + # Conditioning + "CLIPVisionEncode": "CLIP Vision Encode", + "StyleModelApply": "Apply Style Model", + "CLIPTextEncode": "CLIP Text Encode (Prompt)", + "CLIPSetLastLayer": "CLIP Set Last Layer", + "ConditioningCombine": "Conditioning (Combine)", + "ConditioningSetArea": "Conditioning (Set Area)", + "ControlNetApply": "Apply ControlNet", + # Latent + "VAEEncodeForInpaint": "VAE Encode (for Inpainting)", + "SetLatentNoiseMask": "Set Latent Noise Mask", + "VAEDecode": "VAE Decode", + "VAEEncode": "VAE Encode", + "LatentRotate": "Rotate Latent", + "LatentFlip": "Flip Latent", + "LatentCrop": "Crop Latent", + "EmptyLatentImage": "Empty Latent Image", + "LatentUpscale": "Upscale Latent", + "LatentComposite": "Latent Composite", + # Image + "SaveImage": "Save Image", + "PreviewImage": "Preview Image", + "LoadImage": "Load Image", + "LoadImageMask": "Load Image (as Mask)", + "ImageScale": "Upscale Image", + "ImageUpscaleWithModel": "Upscale Image (using Model)", + "ImageInvert": "Invert Image", + "ImagePadForOutpaint": "Pad Image for Outpainting", + # _for_testing + "VAEDecodeTiled": "VAE Decode (Tiled)", + "VAEEncodeTiled": "VAE Encode (Tiled)", +} + def load_custom_node(module_path): module_name = os.path.basename(module_path) if os.path.isfile(module_path): diff --git a/server.py b/server.py index 80fb2dc72..2e6fc1a28 100644 --- a/server.py +++ b/server.py @@ -153,7 +153,8 @@ class PromptServer(): info['input'] = obj_class.INPUT_TYPES() info['output'] = obj_class.RETURN_TYPES info['output_name'] = obj_class.RETURN_NAMES if hasattr(obj_class, 'RETURN_NAMES') else info['output'] - info['name'] = x #TODO + info['name'] = x + info['display_name'] = nodes.NODE_DISPLAY_NAME_MAPPINGS[x] if x in nodes.NODE_DISPLAY_NAME_MAPPINGS.keys() else x info['description'] = '' info['category'] = 'sd' if hasattr(obj_class, 'CATEGORY'): diff --git a/web/scripts/app.js b/web/scripts/app.js index b29981091..125ce17ae 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -777,7 +777,7 @@ class ComfyApp { app.#invokeExtensionsAsync("nodeCreated", this); }, { - title: nodeData.name, + title: nodeData.display_name, comfyClass: nodeData.name, } ); From e6e30ee7cbf88c731b34f8ef52832920567940b6 Mon Sep 17 00:00:00 2001 From: City <125218114+city96@users.noreply.github.com> Date: Fri, 31 Mar 2023 00:54:35 +0200 Subject: [PATCH 02/42] Fallback for node title --- web/scripts/app.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/scripts/app.js b/web/scripts/app.js index 125ce17ae..404d87cb6 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -777,7 +777,7 @@ class ComfyApp { app.#invokeExtensionsAsync("nodeCreated", this); }, { - title: nodeData.display_name, + title: nodeData.display_name || nodeData.name, comfyClass: nodeData.name, } ); From 9ac95e6ac7a16283ab5f0710fdda15be3b5a5e50 Mon Sep 17 00:00:00 2001 From: City <125218114+city96@users.noreply.github.com> Date: Fri, 31 Mar 2023 07:05:17 +0200 Subject: [PATCH 03/42] Add human-readable name support for custom nodes --- custom_nodes/example_node.py.example | 5 +++++ nodes.py | 2 ++ 2 files changed, 7 insertions(+) diff --git a/custom_nodes/example_node.py.example b/custom_nodes/example_node.py.example index 1bb1a5a37..d2f6e3332 100644 --- a/custom_nodes/example_node.py.example +++ b/custom_nodes/example_node.py.example @@ -84,3 +84,8 @@ class Example: NODE_CLASS_MAPPINGS = { "Example": Example } + +# A dictionary that contains the friendly/humanly readable titles for the nodes +NODE_DISPLAY_NAME_MAPPINGS = { + "Example": "Example Node" +} diff --git a/nodes.py b/nodes.py index 538787555..da71dfc04 100644 --- a/nodes.py +++ b/nodes.py @@ -1081,6 +1081,8 @@ def load_custom_node(module_path): module_spec.loader.exec_module(module) if hasattr(module, "NODE_CLASS_MAPPINGS") and getattr(module, "NODE_CLASS_MAPPINGS") is not None: NODE_CLASS_MAPPINGS.update(module.NODE_CLASS_MAPPINGS) + if hasattr(module, "NODE_DISPLAY_NAME_MAPPINGS") and getattr(module, "NODE_DISPLAY_NAME_MAPPINGS") is not None: + NODE_DISPLAY_NAME_MAPPINGS.update(module.NODE_DISPLAY_NAME_MAPPINGS) else: print(f"Skip {module_path} module for custom nodes due to the lack of NODE_CLASS_MAPPINGS.") except Exception as e: From 37713e3b0acfc576f4eafc0b47582374ab5987dc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E8=97=8D+85CD?= <50108258+kwaa@users.noreply.github.com> Date: Wed, 5 Apr 2023 21:22:14 +0800 Subject: [PATCH 04/42] Add basic XPU device support closed #387 --- comfy/model_management.py | 24 ++++++++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index 052dfb775..f0b8be55e 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -5,6 +5,7 @@ LOW_VRAM = 2 NORMAL_VRAM = 3 HIGH_VRAM = 4 MPS = 5 +XPU = 6 accelerate_enabled = False vram_state = NORMAL_VRAM @@ -85,10 +86,17 @@ try: except: pass +try: + import intel_extension_for_pytorch + if torch.xpu.is_available(): + vram_state = XPU +except: + pass + if forced_cpu: vram_state = CPU -print("Set vram state to:", ["CPU", "NO VRAM", "LOW VRAM", "NORMAL VRAM", "HIGH VRAM", "MPS"][vram_state]) +print("Set vram state to:", ["CPU", "NO VRAM", "LOW VRAM", "NORMAL VRAM", "HIGH VRAM", "MPS", "XPU"][vram_state]) current_loaded_model = None @@ -141,6 +149,9 @@ def load_model_gpu(model): mps_device = torch.device("mps") real_model.to(mps_device) pass + elif vram_state == XPU: + real_model.to("xpu") + pass elif vram_state == NORMAL_VRAM or vram_state == HIGH_VRAM: model_accelerated = False real_model.cuda() @@ -189,6 +200,8 @@ def unload_if_low_vram(model): def get_torch_device(): if vram_state == MPS: return torch.device("mps") + if vram_state == XPU: + return torch.device("xpu") if vram_state == CPU: return torch.device("cpu") else: @@ -228,6 +241,9 @@ def get_free_memory(dev=None, torch_free_too=False): if hasattr(dev, 'type') and (dev.type == 'cpu' or dev.type == 'mps'): mem_free_total = psutil.virtual_memory().available mem_free_torch = mem_free_total + elif hasattr(dev, 'type') and (dev.type == 'xpu'): + mem_free_total = torch.xpu.get_device_properties(dev).total_memory - torch.xpu.memory_allocated(dev) + mem_free_torch = mem_free_total else: stats = torch.cuda.memory_stats(dev) mem_active = stats['active_bytes.all.current'] @@ -258,8 +274,12 @@ def mps_mode(): global vram_state return vram_state == MPS +def xpu_mode(): + global vram_state + return vram_state == XPU + def should_use_fp16(): - if cpu_mode() or mps_mode(): + if cpu_mode() or mps_mode() or xpu_mode(): return False #TODO ? if torch.cuda.is_bf16_supported(): From 1ced2bdd2da9a13caf72d7bff36d7f645f443fc7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E8=97=8D+85CD?= <50108258+kwaa@users.noreply.github.com> Date: Wed, 5 Apr 2023 21:25:37 +0800 Subject: [PATCH 05/42] Specify safetensors version to avoid upstream errors https://github.com/huggingface/safetensors/issues/142 --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 3b4040a29..0527b31df 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,7 +4,7 @@ torchsde einops open-clip-torch transformers>=4.25.1 -safetensors +safetensors>=0.3.0 pytorch_lightning aiohttp accelerate From dd29966f8a2973529ea50de2ef3d0e7c72b5114e Mon Sep 17 00:00:00 2001 From: EllangoK Date: Wed, 5 Apr 2023 20:32:59 -0400 Subject: [PATCH 06/42] changes main.py to use argparse --- main.py | 118 ++++++++++++++++++++++---------------------------------- 1 file changed, 47 insertions(+), 71 deletions(-) diff --git a/main.py b/main.py index a3549b86f..20c8a49e8 100644 --- a/main.py +++ b/main.py @@ -1,57 +1,54 @@ -import os -import sys -import shutil - -import threading +import argparse import asyncio +import os +import shutil +import sys +import threading if os.name == "nt": import logging logging.getLogger("xformers").addFilter(lambda record: 'A matching Triton is not available' not in record.getMessage()) if __name__ == "__main__": - if '--help' in sys.argv: - print() - print("Valid Command line Arguments:") - print("\t--listen [ip]\t\t\tListen on ip or 0.0.0.0 if none given so the UI can be accessed from other computers.") - print("\t--port 8188\t\t\tSet the listen port.") - print() - print("\t--extra-model-paths-config file.yaml\tload an extra_model_paths.yaml file.") - print("\t--output-directory path/to/output\tSet the ComfyUI output directory.") - print() - print() - print("\t--dont-upcast-attention\t\tDisable upcasting of attention \n\t\t\t\t\tcan boost speed but increase the chances of black images.\n") - print("\t--use-split-cross-attention\tUse the split cross attention optimization instead of the sub-quadratic one.\n\t\t\t\t\tIgnored when xformers is used.") - print("\t--use-pytorch-cross-attention\tUse the new pytorch 2.0 cross attention function.") - print("\t--disable-xformers\t\tdisables xformers") - print("\t--cuda-device 1\t\tSet the id of the cuda device this instance will use.") - print() - print("\t--highvram\t\t\tBy default models will be unloaded to CPU memory after being used.\n\t\t\t\t\tThis option keeps them in GPU memory.\n") - print("\t--normalvram\t\t\tUsed to force normal vram use if lowvram gets automatically enabled.") - print("\t--lowvram\t\t\tSplit the unet in parts to use less vram.") - print("\t--novram\t\t\tWhen lowvram isn't enough.") - print() - print("\t--cpu\t\t\tTo use the CPU for everything (slow).") - exit() + parser = argparse.ArgumentParser(description="Script Arguments") - if '--dont-upcast-attention' in sys.argv: + parser.add_argument("--listen", type=str, default="127.0.0.1", help="Listen on IP or 0.0.0.0 if none given so the UI can be accessed from other computers.") + parser.add_argument("--port", type=int, default=8188, help="Set the listen port.") + parser.add_argument("--extra-model-paths-config", type=str, default=None, help="Load an extra_model_paths.yaml file.") + parser.add_argument("--output-directory", type=str, default=None, help="Set the ComfyUI output directory.") + parser.add_argument("--dont-upcast-attention", action="store_true", help="Disable upcasting of attention. Can boost speed but increase the chances of black images.") + parser.add_argument("--use-split-cross-attention", action="store_true", help="Use the split cross attention optimization instead of the sub-quadratic one. Ignored when xformers is used.") + parser.add_argument("--use-pytorch-cross-attention", action="store_true", help="Use the new pytorch 2.0 cross attention function.") + parser.add_argument("--disable-xformers", action="store_true", help="Disable xformers.") + parser.add_argument("--cuda-device", type=int, default=None, help="Set the id of the cuda device this instance will use.") + parser.add_argument("--highvram", action="store_true", help="By default models will be unloaded to CPU memory after being used. This option keeps them in GPU memory.") + parser.add_argument("--normalvram", action="store_true", help="Used to force normal vram use if lowvram gets automatically enabled.") + parser.add_argument("--lowvram", action="store_true", help="Split the unet in parts to use less vram.") + parser.add_argument("--novram", action="store_true", help="When lowvram isn't enough.") + parser.add_argument("--cpu", action="store_true", help="To use the CPU for everything (slow).") + parser.add_argument("--dont-print-server", action="store_true", help="Don't print server output.") + parser.add_argument("--quick-test-for-ci", action="store_true", help="Quick test for CI.") + parser.add_argument("--windows-standalone-build", action="store_true", help="Windows standalone build.") + + args = parser.parse_args() + + if args.dont_upcast_attention: print("disabling upcasting of attention") os.environ['ATTN_PRECISION'] = "fp16" - try: - index = sys.argv.index('--cuda-device') - device = sys.argv[index + 1] - os.environ['CUDA_VISIBLE_DEVICES'] = device - print("Set cuda device to:", device) - except: - pass + if args.cuda_device is not None: + os.environ['CUDA_VISIBLE_DEVICES'] = str(args.cuda_device) + print("Set cuda device to:", args.cuda_device) + -from nodes import init_custom_nodes -import execution -import server -import folder_paths import yaml +import execution +import folder_paths +import server +from nodes import init_custom_nodes + + def prompt_worker(q, server): e = execution.PromptExecutor(server) while True: @@ -110,51 +107,30 @@ if __name__ == "__main__": hijack_progress(server) threading.Thread(target=prompt_worker, daemon=True, args=(q,server,)).start() - try: - address = '0.0.0.0' - p_index = sys.argv.index('--listen') - try: - ip = sys.argv[p_index + 1] - if ip[:2] != '--': - address = ip - except: - pass - except: - address = '127.0.0.1' - dont_print = False - if '--dont-print-server' in sys.argv: - dont_print = True + address = args.listen + + dont_print = args.dont_print_server extra_model_paths_config_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "extra_model_paths.yaml") if os.path.isfile(extra_model_paths_config_path): load_extra_path_config(extra_model_paths_config_path) - if '--extra-model-paths-config' in sys.argv: - indices = [(i + 1) for i in range(len(sys.argv) - 1) if sys.argv[i] == '--extra-model-paths-config'] - for i in indices: - load_extra_path_config(sys.argv[i]) + if args.extra_model_paths_config: + load_extra_path_config(args.extra_model_paths_config) - try: - output_dir = sys.argv[sys.argv.index('--output-directory') + 1] - output_dir = os.path.abspath(output_dir) + if args.output_directory: + output_dir = os.path.abspath(args.output_directory) print("setting output directory to:", output_dir) folder_paths.set_output_directory(output_dir) - except: - pass - port = 8188 - try: - p_index = sys.argv.index('--port') - port = int(sys.argv[p_index + 1]) - except: - pass + port = args.port - if '--quick-test-for-ci' in sys.argv: + if args.quick_test_for_ci: exit(0) call_on_start = None - if "--windows-standalone-build" in sys.argv: + if args.windows_standalone_build: def startup_server(address, port): import webbrowser webbrowser.open("http://{}:{}".format(address, port)) From e5e587b1c0c5dc728d65b3e84592445cdb5e6e9b Mon Sep 17 00:00:00 2001 From: EllangoK Date: Wed, 5 Apr 2023 23:41:23 -0400 Subject: [PATCH 07/42] seperates out arg parser and imports args --- comfy/cli_args.py | 29 +++++++++ comfy/ldm/modules/attention.py | 5 +- comfy/model_management.py | 111 ++++++++++++++++----------------- main.py | 27 +------- 4 files changed, 88 insertions(+), 84 deletions(-) create mode 100644 comfy/cli_args.py diff --git a/comfy/cli_args.py b/comfy/cli_args.py new file mode 100644 index 000000000..6a56e315c --- /dev/null +++ b/comfy/cli_args.py @@ -0,0 +1,29 @@ +import argparse + +parser = argparse.ArgumentParser() + +parser.add_argument("--listen", type=str, default="127.0.0.1", help="Listen on IP or 127.0.0.1 if none given so the UI can be accessed from other computers.") +parser.add_argument("--port", type=int, default=8188, help="Set the listen port.") +parser.add_argument("--extra-model-paths-config", type=str, default=None, help="Load an extra_model_paths.yaml file.") +parser.add_argument("--output-directory", type=str, default=None, help="Set the ComfyUI output directory.") +parser.add_argument("--dont-upcast-attention", action="store_true", help="Disable upcasting of attention. Can boost speed but increase the chances of black images.") + +attn_group = parser.add_mutually_exclusive_group() +attn_group.add_argument("--use-split-cross-attention", action="store_true", help="Use the split cross attention optimization instead of the sub-quadratic one. Ignored when xformers is used.") +attn_group.add_argument("--use-pytorch-cross-attention", action="store_true", help="Use the new pytorch 2.0 cross attention function.") + +parser.add_argument("--disable-xformers", action="store_true", help="Disable xformers.") +parser.add_argument("--cuda-device", type=int, default=None, help="Set the id of the cuda device this instance will use.") + +vram_group = parser.add_mutually_exclusive_group() +vram_group.add_argument("--highvram", action="store_true", help="By default models will be unloaded to CPU memory after being used. This option keeps them in GPU memory.") +vram_group.add_argument("--normalvram", action="store_true", help="Used to force normal vram use if lowvram gets automatically enabled.") +vram_group.add_argument("--lowvram", action="store_true", help="Split the unet in parts to use less vram.") +vram_group.add_argument("--novram", action="store_true", help="When lowvram isn't enough.") +vram_group.add_argument("--cpu", action="store_true", help="To use the CPU for everything (slow).") + +parser.add_argument("--dont-print-server", action="store_true", help="Don't print server output.") +parser.add_argument("--quick-test-for-ci", action="store_true", help="Quick test for CI.") +parser.add_argument("--windows-standalone-build", action="store_true", help="Windows standalone build.") + +args = parser.parse_args() diff --git a/comfy/ldm/modules/attention.py b/comfy/ldm/modules/attention.py index 07553627c..92b3eca7c 100644 --- a/comfy/ldm/modules/attention.py +++ b/comfy/ldm/modules/attention.py @@ -21,6 +21,8 @@ if model_management.xformers_enabled(): import os _ATTN_PRECISION = os.environ.get("ATTN_PRECISION", "fp32") +from cli_args import args + def exists(val): return val is not None @@ -474,7 +476,6 @@ class CrossAttentionPytorch(nn.Module): return self.to_out(out) -import sys if model_management.xformers_enabled(): print("Using xformers cross attention") CrossAttention = MemoryEfficientCrossAttention @@ -482,7 +483,7 @@ elif model_management.pytorch_attention_enabled(): print("Using pytorch cross attention") CrossAttention = CrossAttentionPytorch else: - if "--use-split-cross-attention" in sys.argv: + if args.use_split_cross_attention: print("Using split optimization for cross attention") CrossAttention = CrossAttentionDoggettx else: diff --git a/comfy/model_management.py b/comfy/model_management.py index 052dfb775..7dda073dc 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -1,36 +1,35 @@ +import psutil +from enum import Enum +from cli_args import args -CPU = 0 -NO_VRAM = 1 -LOW_VRAM = 2 -NORMAL_VRAM = 3 -HIGH_VRAM = 4 -MPS = 5 +class VRAMState(Enum): + CPU = 0 + NO_VRAM = 1 + LOW_VRAM = 2 + NORMAL_VRAM = 3 + HIGH_VRAM = 4 + MPS = 5 -accelerate_enabled = False -vram_state = NORMAL_VRAM +# Determine VRAM State +vram_state = VRAMState.NORMAL_VRAM +set_vram_to = VRAMState.NORMAL_VRAM total_vram = 0 total_vram_available_mb = -1 -import sys -import psutil - -forced_cpu = "--cpu" in sys.argv - -set_vram_to = NORMAL_VRAM +accelerate_enabled = False try: import torch total_vram = torch.cuda.mem_get_info(torch.cuda.current_device())[1] / (1024 * 1024) total_ram = psutil.virtual_memory().total / (1024 * 1024) - forced_normal_vram = "--normalvram" in sys.argv - if not forced_normal_vram and not forced_cpu: + if not args.normalvram and not args.cpu: if total_vram <= 4096: print("Trying to enable lowvram mode because your GPU seems to have 4GB or less. If you don't want this use: --normalvram") - set_vram_to = LOW_VRAM + set_vram_to = VRAMState.LOW_VRAM elif total_vram > total_ram * 1.1 and total_vram > 14336: print("Enabling highvram mode because your GPU has more vram than your computer has ram. If you don't want this use: --normalvram") - vram_state = HIGH_VRAM + vram_state = VRAMState.HIGH_VRAM except: pass @@ -39,34 +38,32 @@ try: except: OOM_EXCEPTION = Exception -if "--disable-xformers" in sys.argv: - XFORMERS_IS_AVAILBLE = False +if args.disable_xformers: + XFORMERS_IS_AVAILABLE = False else: try: import xformers import xformers.ops - XFORMERS_IS_AVAILBLE = True + XFORMERS_IS_AVAILABLE = True except: - XFORMERS_IS_AVAILBLE = False + XFORMERS_IS_AVAILABLE = False -ENABLE_PYTORCH_ATTENTION = False -if "--use-pytorch-cross-attention" in sys.argv: +ENABLE_PYTORCH_ATTENTION = args.use_pytorch_cross_attention +if ENABLE_PYTORCH_ATTENTION: torch.backends.cuda.enable_math_sdp(True) torch.backends.cuda.enable_flash_sdp(True) torch.backends.cuda.enable_mem_efficient_sdp(True) - ENABLE_PYTORCH_ATTENTION = True - XFORMERS_IS_AVAILBLE = False + XFORMERS_IS_AVAILABLE = False + +if args.lowvram: + set_vram_to = VRAMState.LOW_VRAM +elif args.novram: + set_vram_to = VRAMState.NO_VRAM +elif args.highvram: + vram_state = VRAMState.HIGH_VRAM -if "--lowvram" in sys.argv: - set_vram_to = LOW_VRAM -if "--novram" in sys.argv: - set_vram_to = NO_VRAM -if "--highvram" in sys.argv: - vram_state = HIGH_VRAM - - -if set_vram_to == LOW_VRAM or set_vram_to == NO_VRAM: +if set_vram_to in (VRAMState.LOW_VRAM, VRAMState.NO_VRAM): try: import accelerate accelerate_enabled = True @@ -81,14 +78,14 @@ if set_vram_to == LOW_VRAM or set_vram_to == NO_VRAM: try: if torch.backends.mps.is_available(): - vram_state = MPS + vram_state = VRAMState.MPS except: pass -if forced_cpu: - vram_state = CPU +if args.cpu: + vram_state = VRAMState.CPU -print("Set vram state to:", ["CPU", "NO VRAM", "LOW VRAM", "NORMAL VRAM", "HIGH VRAM", "MPS"][vram_state]) +print(f"Set vram state to: {vram_state.name}") current_loaded_model = None @@ -109,12 +106,12 @@ def unload_model(): model_accelerated = False #never unload models from GPU on high vram - if vram_state != HIGH_VRAM: + if vram_state != VRAMState.HIGH_VRAM: current_loaded_model.model.cpu() current_loaded_model.unpatch_model() current_loaded_model = None - if vram_state != HIGH_VRAM: + if vram_state != VRAMState.HIGH_VRAM: if len(current_gpu_controlnets) > 0: for n in current_gpu_controlnets: n.cpu() @@ -135,19 +132,19 @@ def load_model_gpu(model): model.unpatch_model() raise e current_loaded_model = model - if vram_state == CPU: + if vram_state == VRAMState.CPU: pass - elif vram_state == MPS: + elif vram_state == VRAMState.MPS: mps_device = torch.device("mps") real_model.to(mps_device) pass - elif vram_state == NORMAL_VRAM or vram_state == HIGH_VRAM: + elif vram_state == VRAMState.NORMAL_VRAM or vram_state == VRAMState.HIGH_VRAM: model_accelerated = False real_model.cuda() else: - if vram_state == NO_VRAM: + if vram_state == VRAMState.NO_VRAM: device_map = accelerate.infer_auto_device_map(real_model, max_memory={0: "256MiB", "cpu": "16GiB"}) - elif vram_state == LOW_VRAM: + elif vram_state == VRAMState.LOW_VRAM: device_map = accelerate.infer_auto_device_map(real_model, max_memory={0: "{}MiB".format(total_vram_available_mb), "cpu": "16GiB"}) accelerate.dispatch_model(real_model, device_map=device_map, main_device="cuda") @@ -157,10 +154,10 @@ def load_model_gpu(model): def load_controlnet_gpu(models): global current_gpu_controlnets global vram_state - if vram_state == CPU: + if vram_state == VRAMState.CPU: return - if vram_state == LOW_VRAM or vram_state == NO_VRAM: + if vram_state == VRAMState.LOW_VRAM or vram_state == VRAMState.NO_VRAM: #don't load controlnets like this if low vram because they will be loaded right before running and unloaded right after return @@ -176,20 +173,20 @@ def load_controlnet_gpu(models): def load_if_low_vram(model): global vram_state - if vram_state == LOW_VRAM or vram_state == NO_VRAM: + if vram_state == VRAMState.LOW_VRAM or vram_state == VRAMState.NO_VRAM: return model.cuda() return model def unload_if_low_vram(model): global vram_state - if vram_state == LOW_VRAM or vram_state == NO_VRAM: + if vram_state == VRAMState.LOW_VRAM or vram_state == VRAMState.NO_VRAM: return model.cpu() return model def get_torch_device(): - if vram_state == MPS: + if vram_state == VRAMState.MPS: return torch.device("mps") - if vram_state == CPU: + if vram_state == VRAMState.CPU: return torch.device("cpu") else: return torch.cuda.current_device() @@ -201,9 +198,9 @@ def get_autocast_device(dev): def xformers_enabled(): - if vram_state == CPU: + if vram_state == VRAMState.CPU: return False - return XFORMERS_IS_AVAILBLE + return XFORMERS_IS_AVAILABLE def xformers_enabled_vae(): @@ -243,7 +240,7 @@ def get_free_memory(dev=None, torch_free_too=False): def maximum_batch_area(): global vram_state - if vram_state == NO_VRAM: + if vram_state == VRAMState.NO_VRAM: return 0 memory_free = get_free_memory() / (1024 * 1024) @@ -252,11 +249,11 @@ def maximum_batch_area(): def cpu_mode(): global vram_state - return vram_state == CPU + return vram_state == VRAMState.CPU def mps_mode(): global vram_state - return vram_state == MPS + return vram_state == VRAMState.MPS def should_use_fp16(): if cpu_mode() or mps_mode(): diff --git a/main.py b/main.py index 20c8a49e8..51a48fc6d 100644 --- a/main.py +++ b/main.py @@ -1,37 +1,14 @@ -import argparse import asyncio import os import shutil -import sys import threading +from comfy.cli_args import args if os.name == "nt": import logging logging.getLogger("xformers").addFilter(lambda record: 'A matching Triton is not available' not in record.getMessage()) if __name__ == "__main__": - parser = argparse.ArgumentParser(description="Script Arguments") - - parser.add_argument("--listen", type=str, default="127.0.0.1", help="Listen on IP or 0.0.0.0 if none given so the UI can be accessed from other computers.") - parser.add_argument("--port", type=int, default=8188, help="Set the listen port.") - parser.add_argument("--extra-model-paths-config", type=str, default=None, help="Load an extra_model_paths.yaml file.") - parser.add_argument("--output-directory", type=str, default=None, help="Set the ComfyUI output directory.") - parser.add_argument("--dont-upcast-attention", action="store_true", help="Disable upcasting of attention. Can boost speed but increase the chances of black images.") - parser.add_argument("--use-split-cross-attention", action="store_true", help="Use the split cross attention optimization instead of the sub-quadratic one. Ignored when xformers is used.") - parser.add_argument("--use-pytorch-cross-attention", action="store_true", help="Use the new pytorch 2.0 cross attention function.") - parser.add_argument("--disable-xformers", action="store_true", help="Disable xformers.") - parser.add_argument("--cuda-device", type=int, default=None, help="Set the id of the cuda device this instance will use.") - parser.add_argument("--highvram", action="store_true", help="By default models will be unloaded to CPU memory after being used. This option keeps them in GPU memory.") - parser.add_argument("--normalvram", action="store_true", help="Used to force normal vram use if lowvram gets automatically enabled.") - parser.add_argument("--lowvram", action="store_true", help="Split the unet in parts to use less vram.") - parser.add_argument("--novram", action="store_true", help="When lowvram isn't enough.") - parser.add_argument("--cpu", action="store_true", help="To use the CPU for everything (slow).") - parser.add_argument("--dont-print-server", action="store_true", help="Don't print server output.") - parser.add_argument("--quick-test-for-ci", action="store_true", help="Quick test for CI.") - parser.add_argument("--windows-standalone-build", action="store_true", help="Windows standalone build.") - - args = parser.parse_args() - if args.dont_upcast_attention: print("disabling upcasting of attention") os.environ['ATTN_PRECISION'] = "fp16" @@ -121,7 +98,7 @@ if __name__ == "__main__": if args.output_directory: output_dir = os.path.abspath(args.output_directory) - print("setting output directory to:", output_dir) + print(f"Setting output directory to: {output_dir}") folder_paths.set_output_directory(output_dir) port = args.port From 84b9c0ac2ff49b5b18b8e7804f8fe42a379a0787 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E8=97=8D+85CD?= <50108258+kwaa@users.noreply.github.com> Date: Thu, 6 Apr 2023 12:27:22 +0800 Subject: [PATCH 08/42] Import intel_extension_for_pytorch as ipex --- comfy/model_management.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index f0b8be55e..379cc18d7 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -87,7 +87,7 @@ except: pass try: - import intel_extension_for_pytorch + import intel_extension_for_pytorch as ipex if torch.xpu.is_available(): vram_state = XPU except: From 7cb924f68469cd2481b2313f8e5fc02587279bf3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E8=97=8D+85CD?= <50108258+kwaa@users.noreply.github.com> Date: Thu, 6 Apr 2023 14:24:47 +0800 Subject: [PATCH 09/42] Use separate variables instead of `vram_state` --- comfy/model_management.py | 70 +++++++++++++++++++++------------------ 1 file changed, 37 insertions(+), 33 deletions(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index 379cc18d7..a84167746 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -5,9 +5,9 @@ LOW_VRAM = 2 NORMAL_VRAM = 3 HIGH_VRAM = 4 MPS = 5 -XPU = 6 accelerate_enabled = False +xpu_available = False vram_state = NORMAL_VRAM total_vram = 0 @@ -22,7 +22,12 @@ set_vram_to = NORMAL_VRAM try: import torch - total_vram = torch.cuda.mem_get_info(torch.cuda.current_device())[1] / (1024 * 1024) + import intel_extension_for_pytorch as ipex + if torch.xpu.is_available(): + xpu_available = True + total_vram = torch.xpu.get_device_properties(torch.xpu.current_device()).total_memory / (1024 * 1024) + else: + total_vram = torch.cuda.mem_get_info(torch.cuda.current_device())[1] / (1024 * 1024) total_ram = psutil.virtual_memory().total / (1024 * 1024) forced_normal_vram = "--normalvram" in sys.argv if not forced_normal_vram and not forced_cpu: @@ -86,17 +91,10 @@ try: except: pass -try: - import intel_extension_for_pytorch as ipex - if torch.xpu.is_available(): - vram_state = XPU -except: - pass - if forced_cpu: vram_state = CPU -print("Set vram state to:", ["CPU", "NO VRAM", "LOW VRAM", "NORMAL VRAM", "HIGH VRAM", "MPS", "XPU"][vram_state]) +print("Set vram state to:", ["CPU", "NO VRAM", "LOW VRAM", "NORMAL VRAM", "HIGH VRAM", "MPS"][vram_state]) current_loaded_model = None @@ -133,6 +131,7 @@ def load_model_gpu(model): global current_loaded_model global vram_state global model_accelerated + global xpu_available if model is current_loaded_model: return @@ -149,19 +148,19 @@ def load_model_gpu(model): mps_device = torch.device("mps") real_model.to(mps_device) pass - elif vram_state == XPU: - real_model.to("xpu") - pass elif vram_state == NORMAL_VRAM or vram_state == HIGH_VRAM: model_accelerated = False - real_model.cuda() + if xpu_available: + real_model.to("xpu") + else: + real_model.cuda() else: if vram_state == NO_VRAM: device_map = accelerate.infer_auto_device_map(real_model, max_memory={0: "256MiB", "cpu": "16GiB"}) elif vram_state == LOW_VRAM: device_map = accelerate.infer_auto_device_map(real_model, max_memory={0: "{}MiB".format(total_vram_available_mb), "cpu": "16GiB"}) - accelerate.dispatch_model(real_model, device_map=device_map, main_device="cuda") + accelerate.dispatch_model(real_model, device_map=device_map, main_device="xpu" if xpu_available else "cuda") model_accelerated = True return current_loaded_model @@ -187,8 +186,12 @@ def load_controlnet_gpu(models): def load_if_low_vram(model): global vram_state + global xpu_available if vram_state == LOW_VRAM or vram_state == NO_VRAM: - return model.cuda() + if xpu_available: + return model.to("xpu") + else: + return model.cuda() return model def unload_if_low_vram(model): @@ -198,14 +201,16 @@ def unload_if_low_vram(model): return model def get_torch_device(): + global xpu_available if vram_state == MPS: return torch.device("mps") - if vram_state == XPU: - return torch.device("xpu") if vram_state == CPU: return torch.device("cpu") else: - return torch.cuda.current_device() + if xpu_available: + return torch.device("xpu") + else: + return torch.cuda.current_device() def get_autocast_device(dev): if hasattr(dev, 'type'): @@ -235,22 +240,24 @@ def pytorch_attention_enabled(): return ENABLE_PYTORCH_ATTENTION def get_free_memory(dev=None, torch_free_too=False): + global xpu_available if dev is None: dev = get_torch_device() if hasattr(dev, 'type') and (dev.type == 'cpu' or dev.type == 'mps'): mem_free_total = psutil.virtual_memory().available mem_free_torch = mem_free_total - elif hasattr(dev, 'type') and (dev.type == 'xpu'): - mem_free_total = torch.xpu.get_device_properties(dev).total_memory - torch.xpu.memory_allocated(dev) - mem_free_torch = mem_free_total else: - stats = torch.cuda.memory_stats(dev) - mem_active = stats['active_bytes.all.current'] - mem_reserved = stats['reserved_bytes.all.current'] - mem_free_cuda, _ = torch.cuda.mem_get_info(dev) - mem_free_torch = mem_reserved - mem_active - mem_free_total = mem_free_cuda + mem_free_torch + if xpu_available: + mem_free_total = torch.xpu.get_device_properties(dev).total_memory - torch.xpu.memory_allocated(dev) + mem_free_torch = mem_free_total + else: + stats = torch.cuda.memory_stats(dev) + mem_active = stats['active_bytes.all.current'] + mem_reserved = stats['reserved_bytes.all.current'] + mem_free_cuda, _ = torch.cuda.mem_get_info(dev) + mem_free_torch = mem_reserved - mem_active + mem_free_total = mem_free_cuda + mem_free_torch if torch_free_too: return (mem_free_total, mem_free_torch) @@ -274,12 +281,9 @@ def mps_mode(): global vram_state return vram_state == MPS -def xpu_mode(): - global vram_state - return vram_state == XPU - def should_use_fp16(): - if cpu_mode() or mps_mode() or xpu_mode(): + global xpu_available + if cpu_mode() or mps_mode() or xpu_available: return False #TODO ? if torch.cuda.is_bf16_supported(): From 60127a83040b3b243457980d04f3bb25c4491978 Mon Sep 17 00:00:00 2001 From: sALTaccount Date: Wed, 5 Apr 2023 23:57:31 -0700 Subject: [PATCH 10/42] diffusers loader --- comfy/diffusers_convert.py | 364 +++++++++++++++++++++ models/diffusers/put_diffusers_models_here | 0 nodes.py | 19 +- 3 files changed, 382 insertions(+), 1 deletion(-) create mode 100644 comfy/diffusers_convert.py create mode 100644 models/diffusers/put_diffusers_models_here diff --git a/comfy/diffusers_convert.py b/comfy/diffusers_convert.py new file mode 100644 index 000000000..a31c1c11b --- /dev/null +++ b/comfy/diffusers_convert.py @@ -0,0 +1,364 @@ +import json +import os +import yaml + +# because of local import nonsense +import sys +sys.path.append(os.path.dirname(os.path.realpath(__file__))) + +import folder_paths +from comfy.ldm.util import instantiate_from_config +from comfy.sd import ModelPatcher, load_model_weights, CLIP, VAE +import os.path as osp +import re +import torch +from safetensors.torch import load_file, save_file + +# conversion code from https://github.com/huggingface/diffusers/blob/main/scripts/convert_diffusers_to_original_stable_diffusion.py + +# =================# +# UNet Conversion # +# =================# + +unet_conversion_map = [ + # (stable-diffusion, HF Diffusers) + ("time_embed.0.weight", "time_embedding.linear_1.weight"), + ("time_embed.0.bias", "time_embedding.linear_1.bias"), + ("time_embed.2.weight", "time_embedding.linear_2.weight"), + ("time_embed.2.bias", "time_embedding.linear_2.bias"), + ("input_blocks.0.0.weight", "conv_in.weight"), + ("input_blocks.0.0.bias", "conv_in.bias"), + ("out.0.weight", "conv_norm_out.weight"), + ("out.0.bias", "conv_norm_out.bias"), + ("out.2.weight", "conv_out.weight"), + ("out.2.bias", "conv_out.bias"), +] + +unet_conversion_map_resnet = [ + # (stable-diffusion, HF Diffusers) + ("in_layers.0", "norm1"), + ("in_layers.2", "conv1"), + ("out_layers.0", "norm2"), + ("out_layers.3", "conv2"), + ("emb_layers.1", "time_emb_proj"), + ("skip_connection", "conv_shortcut"), +] + +unet_conversion_map_layer = [] +# hardcoded number of downblocks and resnets/attentions... +# would need smarter logic for other networks. +for i in range(4): + # loop over downblocks/upblocks + + for j in range(2): + # loop over resnets/attentions for downblocks + hf_down_res_prefix = f"down_blocks.{i}.resnets.{j}." + sd_down_res_prefix = f"input_blocks.{3 * i + j + 1}.0." + unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix)) + + if i < 3: + # no attention layers in down_blocks.3 + hf_down_atn_prefix = f"down_blocks.{i}.attentions.{j}." + sd_down_atn_prefix = f"input_blocks.{3 * i + j + 1}.1." + unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix)) + + for j in range(3): + # loop over resnets/attentions for upblocks + hf_up_res_prefix = f"up_blocks.{i}.resnets.{j}." + sd_up_res_prefix = f"output_blocks.{3 * i + j}.0." + unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix)) + + if i > 0: + # no attention layers in up_blocks.0 + hf_up_atn_prefix = f"up_blocks.{i}.attentions.{j}." + sd_up_atn_prefix = f"output_blocks.{3 * i + j}.1." + unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix)) + + if i < 3: + # no downsample in down_blocks.3 + hf_downsample_prefix = f"down_blocks.{i}.downsamplers.0.conv." + sd_downsample_prefix = f"input_blocks.{3 * (i + 1)}.0.op." + unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix)) + + # no upsample in up_blocks.3 + hf_upsample_prefix = f"up_blocks.{i}.upsamplers.0." + sd_upsample_prefix = f"output_blocks.{3 * i + 2}.{1 if i == 0 else 2}." + unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix)) + +hf_mid_atn_prefix = "mid_block.attentions.0." +sd_mid_atn_prefix = "middle_block.1." +unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix)) + +for j in range(2): + hf_mid_res_prefix = f"mid_block.resnets.{j}." + sd_mid_res_prefix = f"middle_block.{2 * j}." + unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix)) + + +def convert_unet_state_dict(unet_state_dict): + # buyer beware: this is a *brittle* function, + # and correct output requires that all of these pieces interact in + # the exact order in which I have arranged them. + mapping = {k: k for k in unet_state_dict.keys()} + for sd_name, hf_name in unet_conversion_map: + mapping[hf_name] = sd_name + for k, v in mapping.items(): + if "resnets" in k: + for sd_part, hf_part in unet_conversion_map_resnet: + v = v.replace(hf_part, sd_part) + mapping[k] = v + for k, v in mapping.items(): + for sd_part, hf_part in unet_conversion_map_layer: + v = v.replace(hf_part, sd_part) + mapping[k] = v + new_state_dict = {v: unet_state_dict[k] for k, v in mapping.items()} + return new_state_dict + + +# ================# +# VAE Conversion # +# ================# + +vae_conversion_map = [ + # (stable-diffusion, HF Diffusers) + ("nin_shortcut", "conv_shortcut"), + ("norm_out", "conv_norm_out"), + ("mid.attn_1.", "mid_block.attentions.0."), +] + +for i in range(4): + # down_blocks have two resnets + for j in range(2): + hf_down_prefix = f"encoder.down_blocks.{i}.resnets.{j}." + sd_down_prefix = f"encoder.down.{i}.block.{j}." + vae_conversion_map.append((sd_down_prefix, hf_down_prefix)) + + if i < 3: + hf_downsample_prefix = f"down_blocks.{i}.downsamplers.0." + sd_downsample_prefix = f"down.{i}.downsample." + vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix)) + + hf_upsample_prefix = f"up_blocks.{i}.upsamplers.0." + sd_upsample_prefix = f"up.{3 - i}.upsample." + vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix)) + + # up_blocks have three resnets + # also, up blocks in hf are numbered in reverse from sd + for j in range(3): + hf_up_prefix = f"decoder.up_blocks.{i}.resnets.{j}." + sd_up_prefix = f"decoder.up.{3 - i}.block.{j}." + vae_conversion_map.append((sd_up_prefix, hf_up_prefix)) + +# this part accounts for mid blocks in both the encoder and the decoder +for i in range(2): + hf_mid_res_prefix = f"mid_block.resnets.{i}." + sd_mid_res_prefix = f"mid.block_{i + 1}." + vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix)) + +vae_conversion_map_attn = [ + # (stable-diffusion, HF Diffusers) + ("norm.", "group_norm."), + ("q.", "query."), + ("k.", "key."), + ("v.", "value."), + ("proj_out.", "proj_attn."), +] + + +def reshape_weight_for_sd(w): + # convert HF linear weights to SD conv2d weights + return w.reshape(*w.shape, 1, 1) + + +def convert_vae_state_dict(vae_state_dict): + mapping = {k: k for k in vae_state_dict.keys()} + for k, v in mapping.items(): + for sd_part, hf_part in vae_conversion_map: + v = v.replace(hf_part, sd_part) + mapping[k] = v + for k, v in mapping.items(): + if "attentions" in k: + for sd_part, hf_part in vae_conversion_map_attn: + v = v.replace(hf_part, sd_part) + mapping[k] = v + new_state_dict = {v: vae_state_dict[k] for k, v in mapping.items()} + weights_to_convert = ["q", "k", "v", "proj_out"] + for k, v in new_state_dict.items(): + for weight_name in weights_to_convert: + if f"mid.attn_1.{weight_name}.weight" in k: + print(f"Reshaping {k} for SD format") + new_state_dict[k] = reshape_weight_for_sd(v) + return new_state_dict + + +# =========================# +# Text Encoder Conversion # +# =========================# + + +textenc_conversion_lst = [ + # (stable-diffusion, HF Diffusers) + ("resblocks.", "text_model.encoder.layers."), + ("ln_1", "layer_norm1"), + ("ln_2", "layer_norm2"), + (".c_fc.", ".fc1."), + (".c_proj.", ".fc2."), + (".attn", ".self_attn"), + ("ln_final.", "transformer.text_model.final_layer_norm."), + ("token_embedding.weight", "transformer.text_model.embeddings.token_embedding.weight"), + ("positional_embedding", "transformer.text_model.embeddings.position_embedding.weight"), +] +protected = {re.escape(x[1]): x[0] for x in textenc_conversion_lst} +textenc_pattern = re.compile("|".join(protected.keys())) + +# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp +code2idx = {"q": 0, "k": 1, "v": 2} + + +def convert_text_enc_state_dict_v20(text_enc_dict): + new_state_dict = {} + capture_qkv_weight = {} + capture_qkv_bias = {} + for k, v in text_enc_dict.items(): + if ( + k.endswith(".self_attn.q_proj.weight") + or k.endswith(".self_attn.k_proj.weight") + or k.endswith(".self_attn.v_proj.weight") + ): + k_pre = k[: -len(".q_proj.weight")] + k_code = k[-len("q_proj.weight")] + if k_pre not in capture_qkv_weight: + capture_qkv_weight[k_pre] = [None, None, None] + capture_qkv_weight[k_pre][code2idx[k_code]] = v + continue + + if ( + k.endswith(".self_attn.q_proj.bias") + or k.endswith(".self_attn.k_proj.bias") + or k.endswith(".self_attn.v_proj.bias") + ): + k_pre = k[: -len(".q_proj.bias")] + k_code = k[-len("q_proj.bias")] + if k_pre not in capture_qkv_bias: + capture_qkv_bias[k_pre] = [None, None, None] + capture_qkv_bias[k_pre][code2idx[k_code]] = v + continue + + relabelled_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], k) + new_state_dict[relabelled_key] = v + + for k_pre, tensors in capture_qkv_weight.items(): + if None in tensors: + raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing") + relabelled_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], k_pre) + new_state_dict[relabelled_key + ".in_proj_weight"] = torch.cat(tensors) + + for k_pre, tensors in capture_qkv_bias.items(): + if None in tensors: + raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing") + relabelled_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], k_pre) + new_state_dict[relabelled_key + ".in_proj_bias"] = torch.cat(tensors) + + return new_state_dict + + +def convert_text_enc_state_dict(text_enc_dict): + return text_enc_dict + + +def load_diffusers(model_path, fp16=True, output_vae=True, output_clip=True, embedding_directory=None): + diffusers_unet_conf = json.load(open(osp.join(model_path, "unet/config.json"))) + diffusers_scheduler_conf = json.load(open(osp.join(model_path, "scheduler/scheduler_config.json"))) + + # magic + v2 = diffusers_unet_conf["sample_size"] == 96 + v_pred = diffusers_scheduler_conf['prediction_type'] == 'v_prediction' + + if v2: + if v_pred: + config_path = folder_paths.get_full_path("configs", 'v2-inference-v.yaml') + else: + config_path = folder_paths.get_full_path("configs", 'v2-inference.yaml') + else: + config_path = folder_paths.get_full_path("configs", 'v1-inference.yaml') + + with open(config_path, 'r') as stream: + config = yaml.safe_load(stream) + + model_config_params = config['model']['params'] + clip_config = model_config_params['cond_stage_config'] + scale_factor = model_config_params['scale_factor'] + vae_config = model_config_params['first_stage_config'] + vae_config['scale_factor'] = scale_factor + + unet_path = osp.join(model_path, "unet", "diffusion_pytorch_model.safetensors") + vae_path = osp.join(model_path, "vae", "diffusion_pytorch_model.safetensors") + text_enc_path = osp.join(model_path, "text_encoder", "model.safetensors") + + # Load models from safetensors if it exists, if it doesn't pytorch + if osp.exists(unet_path): + unet_state_dict = load_file(unet_path, device="cpu") + else: + unet_path = osp.join(model_path, "unet", "diffusion_pytorch_model.bin") + unet_state_dict = torch.load(unet_path, map_location="cpu") + + if osp.exists(vae_path): + vae_state_dict = load_file(vae_path, device="cpu") + else: + vae_path = osp.join(model_path, "vae", "diffusion_pytorch_model.bin") + vae_state_dict = torch.load(vae_path, map_location="cpu") + + if osp.exists(text_enc_path): + text_enc_dict = load_file(text_enc_path, device="cpu") + else: + text_enc_path = osp.join(model_path, "text_encoder", "pytorch_model.bin") + text_enc_dict = torch.load(text_enc_path, map_location="cpu") + + # Convert the UNet model + unet_state_dict = convert_unet_state_dict(unet_state_dict) + unet_state_dict = {"model.diffusion_model." + k: v for k, v in unet_state_dict.items()} + + # Convert the VAE model + vae_state_dict = convert_vae_state_dict(vae_state_dict) + vae_state_dict = {"first_stage_model." + k: v for k, v in vae_state_dict.items()} + + # Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper + is_v20_model = "text_model.encoder.layers.22.layer_norm2.bias" in text_enc_dict + + if is_v20_model: + # Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm + text_enc_dict = {"transformer." + k: v for k, v in text_enc_dict.items()} + text_enc_dict = convert_text_enc_state_dict_v20(text_enc_dict) + text_enc_dict = {"cond_stage_model.model." + k: v for k, v in text_enc_dict.items()} + else: + text_enc_dict = convert_text_enc_state_dict(text_enc_dict) + text_enc_dict = {"cond_stage_model.transformer." + k: v for k, v in text_enc_dict.items()} + + # Put together new checkpoint + sd = {**unet_state_dict, **vae_state_dict, **text_enc_dict} + + clip = None + vae = None + + class WeightsLoader(torch.nn.Module): + pass + + w = WeightsLoader() + load_state_dict_to = [] + if output_vae: + vae = VAE(scale_factor=scale_factor, config=vae_config) + w.first_stage_model = vae.first_stage_model + load_state_dict_to = [w] + + if output_clip: + clip = CLIP(config=clip_config, embedding_directory=embedding_directory) + w.cond_stage_model = clip.cond_stage_model + load_state_dict_to = [w] + + model = instantiate_from_config(config["model"]) + model = load_model_weights(model, sd, verbose=False, load_state_dict_to=load_state_dict_to) + + if fp16: + model = model.half() + + return ModelPatcher(model), clip, vae diff --git a/models/diffusers/put_diffusers_models_here b/models/diffusers/put_diffusers_models_here new file mode 100644 index 000000000..e69de29bb diff --git a/nodes.py b/nodes.py index 187d54a11..776bc3819 100644 --- a/nodes.py +++ b/nodes.py @@ -4,13 +4,14 @@ import os import sys import json import hashlib -import copy import traceback from PIL import Image from PIL.PngImagePlugin import PngInfo import numpy as np +from comfy.diffusers_convert import load_diffusers + sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy")) @@ -219,6 +220,21 @@ class CheckpointLoaderSimple: out = comfy.sd.load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, embedding_directory=folder_paths.get_folder_paths("embeddings")) return out +class DiffusersLoader: + @classmethod + def INPUT_TYPES(cls): + return {"required": {"model_path": (os.listdir(os.path.join(folder_paths.models_dir, 'diffusers'), ),), + }} + RETURN_TYPES = ("MODEL", "CLIP", "VAE") + FUNCTION = "load_checkpoint" + + CATEGORY = "loaders" + + def load_checkpoint(self, model_path, output_vae=True, output_clip=True): + model_path = os.path.join(folder_paths.models_dir, 'diffusers', model_path) + return load_diffusers(model_path, fp16=True, output_vae=output_vae, output_clip=output_clip, embedding_directory=folder_paths.get_folder_paths("embeddings")) + + class unCLIPCheckpointLoader: @classmethod def INPUT_TYPES(s): @@ -1076,6 +1092,7 @@ NODE_CLASS_MAPPINGS = { "TomePatchModel": TomePatchModel, "unCLIPCheckpointLoader": unCLIPCheckpointLoader, "CheckpointLoader": CheckpointLoader, + "DiffusersLoader": DiffusersLoader, } def load_custom_node(module_path): From c418d988ba59b3114770a0fa111d301f04880fca Mon Sep 17 00:00:00 2001 From: sALTaccount Date: Wed, 5 Apr 2023 23:59:03 -0700 Subject: [PATCH 11/42] update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 0f7d24c45..90931141d 100644 --- a/README.md +++ b/README.md @@ -14,7 +14,7 @@ This ui will let you design and execute advanced stable diffusion pipelines usin - Many optimizations: Only re-executes the parts of the workflow that changes between executions. - Command line option: ```--lowvram``` to make it work on GPUs with less than 3GB vram (enabled automatically on GPUs with low vram) - Works even if you don't have a GPU with: ```--cpu``` (slow) -- Can load both ckpt and safetensors models/checkpoints. Standalone VAEs and CLIP models. +- Can load ckpt, safetensors and diffusers models/checkpoints. Standalone VAEs and CLIP models. - Embeddings/Textual inversion - [Loras (regular, locon and loha)](https://comfyanonymous.github.io/ComfyUI_examples/lora/) - Loading full workflows (with seeds) from generated PNG files. From 3d16077e3806b0817b1d43dc14f61e5dee5495c8 Mon Sep 17 00:00:00 2001 From: sALTaccount Date: Thu, 6 Apr 2023 00:24:52 -0700 Subject: [PATCH 12/42] empty list if diffusers directory doesn't exist --- nodes.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/nodes.py b/nodes.py index 776bc3819..1af62887d 100644 --- a/nodes.py +++ b/nodes.py @@ -223,8 +223,11 @@ class CheckpointLoaderSimple: class DiffusersLoader: @classmethod def INPUT_TYPES(cls): - return {"required": {"model_path": (os.listdir(os.path.join(folder_paths.models_dir, 'diffusers'), ),), - }} + paths = [] + search_path = os.path.join(folder_paths.models_dir, 'diffusers') + if os.path.exists(search_path): + paths = next(os.walk(search_path))[1] + return {"required": {"model_path": (paths,), }} RETURN_TYPES = ("MODEL", "CLIP", "VAE") FUNCTION = "load_checkpoint" From 42fd67b5cb0de9bd1228af7a93dec08b2f1486c3 Mon Sep 17 00:00:00 2001 From: sALTaccount Date: Thu, 6 Apr 2023 00:28:06 -0700 Subject: [PATCH 13/42] use precision determined by model management --- nodes.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nodes.py b/nodes.py index 1af62887d..8271da04c 100644 --- a/nodes.py +++ b/nodes.py @@ -235,7 +235,7 @@ class DiffusersLoader: def load_checkpoint(self, model_path, output_vae=True, output_clip=True): model_path = os.path.join(folder_paths.models_dir, 'diffusers', model_path) - return load_diffusers(model_path, fp16=True, output_vae=output_vae, output_clip=output_clip, embedding_directory=folder_paths.get_folder_paths("embeddings")) + return load_diffusers(model_path, fp16=model_management.should_use_fp16(), output_vae=output_vae, output_clip=output_clip, embedding_directory=folder_paths.get_folder_paths("embeddings")) class unCLIPCheckpointLoader: From 3e2608e12b312fd5d2396d4146d992cd4f8b9ab4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E8=97=8D+85CD?= <50108258+kwaa@users.noreply.github.com> Date: Thu, 6 Apr 2023 15:44:05 +0800 Subject: [PATCH 14/42] Fix auto lowvram detection on CUDA --- comfy/model_management.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index a84167746..b0123b5fc 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -22,11 +22,12 @@ set_vram_to = NORMAL_VRAM try: import torch - import intel_extension_for_pytorch as ipex - if torch.xpu.is_available(): - xpu_available = True - total_vram = torch.xpu.get_device_properties(torch.xpu.current_device()).total_memory / (1024 * 1024) - else: + try: + import intel_extension_for_pytorch as ipex + if torch.xpu.is_available(): + xpu_available = True + total_vram = torch.xpu.get_device_properties(torch.xpu.current_device()).total_memory / (1024 * 1024) + except: total_vram = torch.cuda.mem_get_info(torch.cuda.current_device())[1] / (1024 * 1024) total_ram = psutil.virtual_memory().total / (1024 * 1024) forced_normal_vram = "--normalvram" in sys.argv From 01c1fc669fb8cd41f627dad871257acbaaf24b47 Mon Sep 17 00:00:00 2001 From: EllangoK Date: Thu, 6 Apr 2023 13:19:00 -0400 Subject: [PATCH 15/42] set listen flag to listen on all if specifed --- comfy/cli_args.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/cli_args.py b/comfy/cli_args.py index 6a56e315c..a27dc7a7f 100644 --- a/comfy/cli_args.py +++ b/comfy/cli_args.py @@ -2,7 +2,7 @@ import argparse parser = argparse.ArgumentParser() -parser.add_argument("--listen", type=str, default="127.0.0.1", help="Listen on IP or 127.0.0.1 if none given so the UI can be accessed from other computers.") +parser.add_argument("--listen", nargs="?", const="0.0.0.0", default="127.0.0.1", type=str, help="Specify the IP address to listen on (default: 127.0.0.1). If --listen is provided without an argument, it defaults to 0.0.0.0. (listens on all)") parser.add_argument("--port", type=int, default=8188, help="Set the listen port.") parser.add_argument("--extra-model-paths-config", type=str, default=None, help="Load an extra_model_paths.yaml file.") parser.add_argument("--output-directory", type=str, default=None, help="Set the ComfyUI output directory.") From 7d62d89f9325348179fc9b0db146ff50fa7c808c Mon Sep 17 00:00:00 2001 From: EllangoK Date: Wed, 5 Apr 2023 13:08:08 -0400 Subject: [PATCH 16/42] add cors middleware --- server.py | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/server.py b/server.py index 840d9a4e7..005bf9b2c 100644 --- a/server.py +++ b/server.py @@ -27,6 +27,19 @@ async def cache_control(request: web.Request, handler): response.headers.setdefault('Cache-Control', 'no-cache') return response +@web.middleware +async def cors_middleware(request: web.Request, handler): + if request.method == "OPTIONS": + # Pre-flight request. Reply successfully: + response = web.Response() + else: + response = await handler(request) + response.headers['Access-Control-Allow-Origin'] = '*' + response.headers['Access-Control-Allow-Methods'] = 'POST, GET, DELETE, PUT, OPTIONS' + response.headers['Access-Control-Allow-Headers'] = 'Content-Type, Authorization' + response.headers['Access-Control-Allow-Credentials'] = 'true' + return response + class PromptServer(): def __init__(self, loop): PromptServer.instance = self @@ -37,7 +50,7 @@ class PromptServer(): self.loop = loop self.messages = asyncio.Queue() self.number = 0 - self.app = web.Application(client_max_size=20971520, middlewares=[cache_control]) + self.app = web.Application(client_max_size=20971520, middlewares=[cache_control, cors_middleware]) self.sockets = dict() self.web_root = os.path.join(os.path.dirname( os.path.realpath(__file__)), "web") From 48efae16084b423166f9a1930b989489169d22cf Mon Sep 17 00:00:00 2001 From: EllangoK Date: Thu, 6 Apr 2023 15:06:22 -0400 Subject: [PATCH 17/42] makes cors a cli parameter --- comfy/cli_args.py | 3 ++- server.py | 36 +++++++++++++++++++++++------------- 2 files changed, 25 insertions(+), 14 deletions(-) diff --git a/comfy/cli_args.py b/comfy/cli_args.py index a27dc7a7f..5133e0ae5 100644 --- a/comfy/cli_args.py +++ b/comfy/cli_args.py @@ -4,8 +4,10 @@ parser = argparse.ArgumentParser() parser.add_argument("--listen", nargs="?", const="0.0.0.0", default="127.0.0.1", type=str, help="Specify the IP address to listen on (default: 127.0.0.1). If --listen is provided without an argument, it defaults to 0.0.0.0. (listens on all)") parser.add_argument("--port", type=int, default=8188, help="Set the listen port.") +parser.add_argument("--cors", default=None, nargs="?", const="*", help="Enable CORS (Cross-Origin Resource Sharing) with optional origin or allow all with default '*'.") parser.add_argument("--extra-model-paths-config", type=str, default=None, help="Load an extra_model_paths.yaml file.") parser.add_argument("--output-directory", type=str, default=None, help="Set the ComfyUI output directory.") +parser.add_argument("--cuda-device", type=int, default=None, help="Set the id of the cuda device this instance will use.") parser.add_argument("--dont-upcast-attention", action="store_true", help="Disable upcasting of attention. Can boost speed but increase the chances of black images.") attn_group = parser.add_mutually_exclusive_group() @@ -13,7 +15,6 @@ attn_group.add_argument("--use-split-cross-attention", action="store_true", help attn_group.add_argument("--use-pytorch-cross-attention", action="store_true", help="Use the new pytorch 2.0 cross attention function.") parser.add_argument("--disable-xformers", action="store_true", help="Disable xformers.") -parser.add_argument("--cuda-device", type=int, default=None, help="Set the id of the cuda device this instance will use.") vram_group = parser.add_mutually_exclusive_group() vram_group.add_argument("--highvram", action="store_true", help="By default models will be unloaded to CPU memory after being used. This option keeps them in GPU memory.") diff --git a/server.py b/server.py index 005bf9b2c..a9c0b4599 100644 --- a/server.py +++ b/server.py @@ -18,6 +18,7 @@ except ImportError: sys.exit() import mimetypes +from comfy.cli_args import args @web.middleware @@ -27,18 +28,22 @@ async def cache_control(request: web.Request, handler): response.headers.setdefault('Cache-Control', 'no-cache') return response -@web.middleware -async def cors_middleware(request: web.Request, handler): - if request.method == "OPTIONS": - # Pre-flight request. Reply successfully: - response = web.Response() - else: - response = await handler(request) - response.headers['Access-Control-Allow-Origin'] = '*' - response.headers['Access-Control-Allow-Methods'] = 'POST, GET, DELETE, PUT, OPTIONS' - response.headers['Access-Control-Allow-Headers'] = 'Content-Type, Authorization' - response.headers['Access-Control-Allow-Credentials'] = 'true' - return response +def create_cors_middleware(allowed_origin: str): + @web.middleware + async def cors_middleware(request: web.Request, handler): + if request.method == "OPTIONS": + # Pre-flight request. Reply successfully: + response = web.Response() + else: + response = await handler(request) + + response.headers['Access-Control-Allow-Origin'] = allowed_origin + response.headers['Access-Control-Allow-Methods'] = 'POST, GET, DELETE, PUT, OPTIONS' + response.headers['Access-Control-Allow-Headers'] = 'Content-Type, Authorization' + response.headers['Access-Control-Allow-Credentials'] = 'true' + return response + + return cors_middleware class PromptServer(): def __init__(self, loop): @@ -50,7 +55,12 @@ class PromptServer(): self.loop = loop self.messages = asyncio.Queue() self.number = 0 - self.app = web.Application(client_max_size=20971520, middlewares=[cache_control, cors_middleware]) + + middlewares = [cache_control] + if args.cors: + middlewares.append(create_cors_middleware(args.cors)) + + self.app = web.Application(client_max_size=20971520, middlewares=middlewares) self.sockets = dict() self.web_root = os.path.join(os.path.dirname( os.path.realpath(__file__)), "web") From f84f2508cc45a014cc27e023e9623db0450d237e Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 6 Apr 2023 15:24:55 -0400 Subject: [PATCH 18/42] Rename the cors parameter to something more verbose. --- comfy/cli_args.py | 2 +- server.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/comfy/cli_args.py b/comfy/cli_args.py index 5133e0ae5..f2960ae31 100644 --- a/comfy/cli_args.py +++ b/comfy/cli_args.py @@ -4,7 +4,7 @@ parser = argparse.ArgumentParser() parser.add_argument("--listen", nargs="?", const="0.0.0.0", default="127.0.0.1", type=str, help="Specify the IP address to listen on (default: 127.0.0.1). If --listen is provided without an argument, it defaults to 0.0.0.0. (listens on all)") parser.add_argument("--port", type=int, default=8188, help="Set the listen port.") -parser.add_argument("--cors", default=None, nargs="?", const="*", help="Enable CORS (Cross-Origin Resource Sharing) with optional origin or allow all with default '*'.") +parser.add_argument("--enable-cors-header", default=None, nargs="?", const="*", help="Enable CORS (Cross-Origin Resource Sharing) with optional origin or allow all with default '*'.") parser.add_argument("--extra-model-paths-config", type=str, default=None, help="Load an extra_model_paths.yaml file.") parser.add_argument("--output-directory", type=str, default=None, help="Set the ComfyUI output directory.") parser.add_argument("--cuda-device", type=int, default=None, help="Set the id of the cuda device this instance will use.") diff --git a/server.py b/server.py index a9c0b4599..95cdeb051 100644 --- a/server.py +++ b/server.py @@ -57,8 +57,8 @@ class PromptServer(): self.number = 0 middlewares = [cache_control] - if args.cors: - middlewares.append(create_cors_middleware(args.cors)) + if args.enable_cors_header: + middlewares.append(create_cors_middleware(args.enable_cors_header)) self.app = web.Application(client_max_size=20971520, middlewares=middlewares) self.sockets = dict() From 28fff5d1dbba8b4a546e31c69240133f35b2235f Mon Sep 17 00:00:00 2001 From: EllangoK Date: Thu, 6 Apr 2023 19:06:39 -0400 Subject: [PATCH 19/42] fixes lack of support for multi configs also adds some metavars to argarse --- comfy/cli_args.py | 8 ++++---- main.py | 5 ++++- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/comfy/cli_args.py b/comfy/cli_args.py index f2960ae31..b6898cea9 100644 --- a/comfy/cli_args.py +++ b/comfy/cli_args.py @@ -2,12 +2,12 @@ import argparse parser = argparse.ArgumentParser() -parser.add_argument("--listen", nargs="?", const="0.0.0.0", default="127.0.0.1", type=str, help="Specify the IP address to listen on (default: 127.0.0.1). If --listen is provided without an argument, it defaults to 0.0.0.0. (listens on all)") +parser.add_argument("--listen", type=str, default="127.0.0.1", metavar="IP", nargs="?", const="0.0.0.0", help="Specify the IP address to listen on (default: 127.0.0.1). If --listen is provided without an argument, it defaults to 0.0.0.0. (listens on all)") parser.add_argument("--port", type=int, default=8188, help="Set the listen port.") -parser.add_argument("--enable-cors-header", default=None, nargs="?", const="*", help="Enable CORS (Cross-Origin Resource Sharing) with optional origin or allow all with default '*'.") -parser.add_argument("--extra-model-paths-config", type=str, default=None, help="Load an extra_model_paths.yaml file.") +parser.add_argument("--enable-cors-header", type=str, default=None, metavar="ORIGIN", nargs="?", const="*", help="Enable CORS (Cross-Origin Resource Sharing) with optional origin or allow all with default '*'.") +parser.add_argument("--extra-model-paths-config", type=str, default=None, metavar="PATH", nargs='+', action='append', help="Load one or more extra_model_paths.yaml files.") parser.add_argument("--output-directory", type=str, default=None, help="Set the ComfyUI output directory.") -parser.add_argument("--cuda-device", type=int, default=None, help="Set the id of the cuda device this instance will use.") +parser.add_argument("--cuda-device", type=int, default=None, metavar="DEVICE_ID", help="Set the id of the cuda device this instance will use.") parser.add_argument("--dont-upcast-attention", action="store_true", help="Disable upcasting of attention. Can boost speed but increase the chances of black images.") attn_group = parser.add_mutually_exclusive_group() diff --git a/main.py b/main.py index 51a48fc6d..9c0a3d8a1 100644 --- a/main.py +++ b/main.py @@ -1,7 +1,9 @@ import asyncio +import itertools import os import shutil import threading + from comfy.cli_args import args if os.name == "nt": @@ -94,7 +96,8 @@ if __name__ == "__main__": load_extra_path_config(extra_model_paths_config_path) if args.extra_model_paths_config: - load_extra_path_config(args.extra_model_paths_config) + for config_path in itertools.chain(*args.extra_model_paths_config): + load_extra_path_config(config_path) if args.output_directory: output_dir = os.path.abspath(args.output_directory) From 60b4c31ab3c2ec16575c26d9d08ecabc8643b381 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 6 Apr 2023 22:22:59 -0400 Subject: [PATCH 20/42] Add webp images to upload accept list. --- web/scripts/widgets.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/scripts/widgets.js b/web/scripts/widgets.js index 5f5043cd0..d1a9c6c6e 100644 --- a/web/scripts/widgets.js +++ b/web/scripts/widgets.js @@ -306,7 +306,7 @@ export const ComfyWidgets = { const fileInput = document.createElement("input"); Object.assign(fileInput, { type: "file", - accept: "image/jpeg,image/png", + accept: "image/jpeg,image/png,image/webp", style: "display: none", onchange: async () => { if (fileInput.files.length) { From bceccca0e59862c3410b5d99b47fe1e01ba914af Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 6 Apr 2023 23:52:34 -0400 Subject: [PATCH 21/42] Small refactor. --- comfy/model_management.py | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index 92c59efe7..504da2190 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -129,7 +129,6 @@ def load_model_gpu(model): global current_loaded_model global vram_state global model_accelerated - global xpu_available if model is current_loaded_model: return @@ -148,17 +147,14 @@ def load_model_gpu(model): pass elif vram_state == VRAMState.NORMAL_VRAM or vram_state == VRAMState.HIGH_VRAM: model_accelerated = False - if xpu_available: - real_model.to("xpu") - else: - real_model.cuda() + real_model.to(get_torch_device()) else: if vram_state == VRAMState.NO_VRAM: device_map = accelerate.infer_auto_device_map(real_model, max_memory={0: "256MiB", "cpu": "16GiB"}) elif vram_state == VRAMState.LOW_VRAM: device_map = accelerate.infer_auto_device_map(real_model, max_memory={0: "{}MiB".format(total_vram_available_mb), "cpu": "16GiB"}) - accelerate.dispatch_model(real_model, device_map=device_map, main_device="xpu" if xpu_available else "cuda") + accelerate.dispatch_model(real_model, device_map=device_map, main_device=get_torch_device()) model_accelerated = True return current_loaded_model @@ -184,12 +180,8 @@ def load_controlnet_gpu(models): def load_if_low_vram(model): global vram_state - global xpu_available if vram_state == VRAMState.LOW_VRAM or vram_state == VRAMState.NO_VRAM: - if xpu_available: - return model.to("xpu") - else: - return model.cuda() + return model.to(get_torch_device()) return model def unload_if_low_vram(model): From 64557d67810c81f72bd6a7544bd8930488868319 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 7 Apr 2023 00:27:54 -0400 Subject: [PATCH 22/42] Add a --force-fp32 argument to force fp32 for debugging. --- comfy/cli_args.py | 1 + comfy/model_management.py | 8 ++++++++ 2 files changed, 9 insertions(+) diff --git a/comfy/cli_args.py b/comfy/cli_args.py index b6898cea9..739891f71 100644 --- a/comfy/cli_args.py +++ b/comfy/cli_args.py @@ -9,6 +9,7 @@ parser.add_argument("--extra-model-paths-config", type=str, default=None, metava parser.add_argument("--output-directory", type=str, default=None, help="Set the ComfyUI output directory.") parser.add_argument("--cuda-device", type=int, default=None, metavar="DEVICE_ID", help="Set the id of the cuda device this instance will use.") parser.add_argument("--dont-upcast-attention", action="store_true", help="Disable upcasting of attention. Can boost speed but increase the chances of black images.") +parser.add_argument("--force-fp32", action="store_true", help="Force fp32 (If this makes your GPU work better please report it).") attn_group = parser.add_mutually_exclusive_group() attn_group.add_argument("--use-split-cross-attention", action="store_true", help="Use the split cross attention optimization instead of the sub-quadratic one. Ignored when xformers is used.") diff --git a/comfy/model_management.py b/comfy/model_management.py index 504da2190..2407140fd 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -69,6 +69,11 @@ elif args.novram: elif args.highvram: vram_state = VRAMState.HIGH_VRAM +FORCE_FP32 = False +if args.force_fp32: + print("Forcing FP32, if this improves things please report it.") + FORCE_FP32 = True + if set_vram_to in (VRAMState.LOW_VRAM, VRAMState.NO_VRAM): try: @@ -273,6 +278,9 @@ def mps_mode(): def should_use_fp16(): global xpu_available + if FORCE_FP32: + return False + if cpu_mode() or mps_mode() or xpu_available: return False #TODO ? From 72a8973bd56b7cc179eb603ccd61385fdca5766d Mon Sep 17 00:00:00 2001 From: sALTaccount Date: Thu, 6 Apr 2023 21:45:08 -0700 Subject: [PATCH 23/42] allow configurable path for diffusers models --- folder_paths.py | 1 + nodes.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/folder_paths.py b/folder_paths.py index f13e4895f..ab3359347 100644 --- a/folder_paths.py +++ b/folder_paths.py @@ -23,6 +23,7 @@ folder_names_and_paths["clip"] = ([os.path.join(models_dir, "clip")], supported_ folder_names_and_paths["clip_vision"] = ([os.path.join(models_dir, "clip_vision")], supported_pt_extensions) folder_names_and_paths["style_models"] = ([os.path.join(models_dir, "style_models")], supported_pt_extensions) folder_names_and_paths["embeddings"] = ([os.path.join(models_dir, "embeddings")], supported_pt_extensions) +folder_names_and_paths["diffusers"] = ([os.path.join(models_dir, "diffusers")], ["folder"]) folder_names_and_paths["controlnet"] = ([os.path.join(models_dir, "controlnet"), os.path.join(models_dir, "t2i_adapter")], supported_pt_extensions) folder_names_and_paths["upscale_models"] = ([os.path.join(models_dir, "upscale_models")], supported_pt_extensions) diff --git a/nodes.py b/nodes.py index 8271da04c..934b458f2 100644 --- a/nodes.py +++ b/nodes.py @@ -224,7 +224,7 @@ class DiffusersLoader: @classmethod def INPUT_TYPES(cls): paths = [] - search_path = os.path.join(folder_paths.models_dir, 'diffusers') + search_path = folder_paths.get_folder_paths("diffusers")[0] if os.path.exists(search_path): paths = next(os.walk(search_path))[1] return {"required": {"model_path": (paths,), }} From f51b7a92c72b5fe7a12d642a545e59f1f6150fb4 Mon Sep 17 00:00:00 2001 From: sALTaccount Date: Thu, 6 Apr 2023 21:48:58 -0700 Subject: [PATCH 24/42] search all diffusers paths (oops) --- nodes.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/nodes.py b/nodes.py index 934b458f2..a4366f834 100644 --- a/nodes.py +++ b/nodes.py @@ -224,9 +224,10 @@ class DiffusersLoader: @classmethod def INPUT_TYPES(cls): paths = [] - search_path = folder_paths.get_folder_paths("diffusers")[0] - if os.path.exists(search_path): - paths = next(os.walk(search_path))[1] + search_paths = folder_paths.get_folder_paths("diffusers") + for search_path in search_paths: + if os.path.exists(search_path): + paths = next(os.walk(search_path))[1] return {"required": {"model_path": (paths,), }} RETURN_TYPES = ("MODEL", "CLIP", "VAE") FUNCTION = "load_checkpoint" From 7734d65f22a8f30f73cb72e81586b2d015229060 Mon Sep 17 00:00:00 2001 From: sALTaccount Date: Thu, 6 Apr 2023 22:02:26 -0700 Subject: [PATCH 25/42] fix loading alt folders --- nodes.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/nodes.py b/nodes.py index a4366f834..274ae2f1f 100644 --- a/nodes.py +++ b/nodes.py @@ -224,10 +224,9 @@ class DiffusersLoader: @classmethod def INPUT_TYPES(cls): paths = [] - search_paths = folder_paths.get_folder_paths("diffusers") - for search_path in search_paths: + for search_path in folder_paths.get_folder_paths("diffusers"): if os.path.exists(search_path): - paths = next(os.walk(search_path))[1] + paths += next(os.walk(search_path))[1] return {"required": {"model_path": (paths,), }} RETURN_TYPES = ("MODEL", "CLIP", "VAE") FUNCTION = "load_checkpoint" @@ -235,7 +234,13 @@ class DiffusersLoader: CATEGORY = "loaders" def load_checkpoint(self, model_path, output_vae=True, output_clip=True): - model_path = os.path.join(folder_paths.models_dir, 'diffusers', model_path) + for search_path in folder_paths.get_folder_paths("diffusers"): + if os.path.exists(search_path): + paths = next(os.walk(search_path))[1] + if model_path in paths: + model_path = os.path.join(search_path, model_path) + break + search_paths = folder_paths.get_folder_paths("diffusers") return load_diffusers(model_path, fp16=model_management.should_use_fp16(), output_vae=output_vae, output_clip=output_clip, embedding_directory=folder_paths.get_folder_paths("embeddings")) From 58ed0f2da438aaf253f9880578d694ad917819f8 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 7 Apr 2023 01:28:15 -0400 Subject: [PATCH 26/42] Fix loading SD1.5 diffusers checkpoint. --- comfy/diffusers_convert.py | 4 +++- nodes.py | 4 ++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/comfy/diffusers_convert.py b/comfy/diffusers_convert.py index a31c1c11b..950137f2c 100644 --- a/comfy/diffusers_convert.py +++ b/comfy/diffusers_convert.py @@ -272,7 +272,8 @@ def load_diffusers(model_path, fp16=True, output_vae=True, output_clip=True, emb # magic v2 = diffusers_unet_conf["sample_size"] == 96 - v_pred = diffusers_scheduler_conf['prediction_type'] == 'v_prediction' + if 'prediction_type' in diffusers_scheduler_conf: + v_pred = diffusers_scheduler_conf['prediction_type'] == 'v_prediction' if v2: if v_pred: @@ -290,6 +291,7 @@ def load_diffusers(model_path, fp16=True, output_vae=True, output_clip=True, emb scale_factor = model_config_params['scale_factor'] vae_config = model_config_params['first_stage_config'] vae_config['scale_factor'] = scale_factor + model_config_params["unet_config"]["params"]["use_fp16"] = fp16 unet_path = osp.join(model_path, "unet", "diffusion_pytorch_model.safetensors") vae_path = osp.join(model_path, "vae", "diffusion_pytorch_model.safetensors") diff --git a/nodes.py b/nodes.py index 274ae2f1f..025e4fcb4 100644 --- a/nodes.py +++ b/nodes.py @@ -231,7 +231,7 @@ class DiffusersLoader: RETURN_TYPES = ("MODEL", "CLIP", "VAE") FUNCTION = "load_checkpoint" - CATEGORY = "loaders" + CATEGORY = "advanced/loaders" def load_checkpoint(self, model_path, output_vae=True, output_clip=True): for search_path in folder_paths.get_folder_paths("diffusers"): @@ -240,7 +240,7 @@ class DiffusersLoader: if model_path in paths: model_path = os.path.join(search_path, model_path) break - search_paths = folder_paths.get_folder_paths("diffusers") + return load_diffusers(model_path, fp16=model_management.should_use_fp16(), output_vae=output_vae, output_clip=output_clip, embedding_directory=folder_paths.get_folder_paths("embeddings")) From 44fea050649347ca4b4e7317a83d11c3b4b87f87 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 7 Apr 2023 02:29:56 -0400 Subject: [PATCH 27/42] Cleanup. --- comfy/diffusers_convert.py | 4 ---- nodes.py | 4 ++-- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/comfy/diffusers_convert.py b/comfy/diffusers_convert.py index 950137f2c..ceca80305 100644 --- a/comfy/diffusers_convert.py +++ b/comfy/diffusers_convert.py @@ -2,10 +2,6 @@ import json import os import yaml -# because of local import nonsense -import sys -sys.path.append(os.path.dirname(os.path.realpath(__file__))) - import folder_paths from comfy.ldm.util import instantiate_from_config from comfy.sd import ModelPatcher, load_model_weights, CLIP, VAE diff --git a/nodes.py b/nodes.py index 025e4fcb4..5c3b3a4ee 100644 --- a/nodes.py +++ b/nodes.py @@ -10,11 +10,11 @@ from PIL import Image from PIL.PngImagePlugin import PngInfo import numpy as np -from comfy.diffusers_convert import load_diffusers sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy")) +import comfy.diffusers_convert import comfy.samplers import comfy.sd import comfy.utils @@ -241,7 +241,7 @@ class DiffusersLoader: model_path = os.path.join(search_path, model_path) break - return load_diffusers(model_path, fp16=model_management.should_use_fp16(), output_vae=output_vae, output_clip=output_clip, embedding_directory=folder_paths.get_folder_paths("embeddings")) + return comfy.diffusers_convert.load_diffusers(model_path, fp16=model_management.should_use_fp16(), output_vae=output_vae, output_clip=output_clip, embedding_directory=folder_paths.get_folder_paths("embeddings")) class unCLIPCheckpointLoader: From 07e9a6b8266326c5f51ea7b0cc20c6129bf5d238 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 7 Apr 2023 15:11:00 -0400 Subject: [PATCH 28/42] Update litegraph from upstream. --- web/lib/litegraph.core.js | 321 +++++++++++++++++++------------------- 1 file changed, 164 insertions(+), 157 deletions(-) diff --git a/web/lib/litegraph.core.js b/web/lib/litegraph.core.js index 862d59067..066f51938 100644 --- a/web/lib/litegraph.core.js +++ b/web/lib/litegraph.core.js @@ -89,6 +89,7 @@ NO_TITLE: 1, TRANSPARENT_TITLE: 2, AUTOHIDE_TITLE: 3, + VERTICAL_LAYOUT: "vertical", // arrange nodes vertically proxy: null, //used to redirect calls node_images_path: "", @@ -125,14 +126,14 @@ registered_slot_out_types: {}, // slot types for nodeclass slot_types_in: [], // slot types IN slot_types_out: [], // slot types OUT - slot_types_default_in: [], // specify for each IN slot type a(/many) deafult node(s), use single string, array, or object (with node, title, parameters, ..) like for search - slot_types_default_out: [], // specify for each OUT slot type a(/many) deafult node(s), use single string, array, or object (with node, title, parameters, ..) like for search + slot_types_default_in: [], // specify for each IN slot type a(/many) default node(s), use single string, array, or object (with node, title, parameters, ..) like for search + slot_types_default_out: [], // specify for each OUT slot type a(/many) default node(s), use single string, array, or object (with node, title, parameters, ..) like for search alt_drag_do_clone_nodes: false, // [true!] very handy, ALT click to clone and drag the new node do_add_triggers_slots: false, // [true!] will create and connect event slots when using action/events connections, !WILL CHANGE node mode when using onTrigger (enable mode colors), onExecuted does not need this - allow_multi_output_for_events: true, // [false!] being events, it is strongly reccomended to use them sequentually, one by one + allow_multi_output_for_events: true, // [false!] being events, it is strongly reccomended to use them sequentially, one by one middle_click_slot_add_default_node: false, //[true!] allows to create and connect a ndoe clicking with the third button (wheel) @@ -158,80 +159,67 @@ console.log("Node registered: " + type); } - var categories = type.split("/"); - var classname = base_class.name; + const classname = base_class.name; - var pos = type.lastIndexOf("/"); - base_class.category = type.substr(0, pos); + const pos = type.lastIndexOf("/"); + base_class.category = type.substring(0, pos); if (!base_class.title) { base_class.title = classname; } - //info.name = name.substr(pos+1,name.length - pos); //extend class - if (base_class.prototype) { - //is a class - for (var i in LGraphNode.prototype) { - if (!base_class.prototype[i]) { - base_class.prototype[i] = LGraphNode.prototype[i]; - } + for (var i in LGraphNode.prototype) { + if (!base_class.prototype[i]) { + base_class.prototype[i] = LGraphNode.prototype[i]; } } - var prev = this.registered_node_types[type]; - if(prev) - console.log("replacing node type: " + type); - else - { - if( !Object.hasOwnProperty( base_class.prototype, "shape") ) - Object.defineProperty(base_class.prototype, "shape", { - set: function(v) { - switch (v) { - case "default": - delete this._shape; - break; - case "box": - this._shape = LiteGraph.BOX_SHAPE; - break; - case "round": - this._shape = LiteGraph.ROUND_SHAPE; - break; - case "circle": - this._shape = LiteGraph.CIRCLE_SHAPE; - break; - case "card": - this._shape = LiteGraph.CARD_SHAPE; - break; - default: - this._shape = v; - } - }, - get: function(v) { - return this._shape; - }, - enumerable: true, - configurable: true - }); + const prev = this.registered_node_types[type]; + if(prev) { + console.log("replacing node type: " + type); + } + if( !Object.prototype.hasOwnProperty.call( base_class.prototype, "shape") ) { + Object.defineProperty(base_class.prototype, "shape", { + set: function(v) { + switch (v) { + case "default": + delete this._shape; + break; + case "box": + this._shape = LiteGraph.BOX_SHAPE; + break; + case "round": + this._shape = LiteGraph.ROUND_SHAPE; + break; + case "circle": + this._shape = LiteGraph.CIRCLE_SHAPE; + break; + case "card": + this._shape = LiteGraph.CARD_SHAPE; + break; + default: + this._shape = v; + } + }, + get: function() { + return this._shape; + }, + enumerable: true, + configurable: true + }); + - //warnings - if (base_class.prototype.onPropertyChange) { - console.warn( - "LiteGraph node class " + - type + - " has onPropertyChange method, it must be called onPropertyChanged with d at the end" - ); - } - - //used to know which nodes create when dragging files to the canvas - if (base_class.supported_extensions) { - for (var i in base_class.supported_extensions) { - var ext = base_class.supported_extensions[i]; - if(ext && ext.constructor === String) - this.node_types_by_file_extension[ ext.toLowerCase() ] = base_class; - } - } - } + //used to know which nodes to create when dragging files to the canvas + if (base_class.supported_extensions) { + for (let i in base_class.supported_extensions) { + const ext = base_class.supported_extensions[i]; + if(ext && ext.constructor === String) { + this.node_types_by_file_extension[ ext.toLowerCase() ] = base_class; + } + } + } + } this.registered_node_types[type] = base_class; if (base_class.constructor.name) { @@ -252,19 +240,11 @@ " has onPropertyChange method, it must be called onPropertyChanged with d at the end" ); } - - //used to know which nodes create when dragging files to the canvas - if (base_class.supported_extensions) { - for (var i=0; i < base_class.supported_extensions.length; i++) { - var ext = base_class.supported_extensions[i]; - if(ext && ext.constructor === String) - this.node_types_by_file_extension[ ext.toLowerCase() ] = base_class; - } - } - // TODO one would want to know input and ouput :: this would allow trought registerNodeAndSlotType to get all the slots types - //console.debug("Registering "+type); - if (this.auto_load_slot_types) nodeTmp = new base_class(base_class.title || "tmpnode"); + // TODO one would want to know input and ouput :: this would allow through registerNodeAndSlotType to get all the slots types + if (this.auto_load_slot_types) { + new base_class(base_class.title || "tmpnode"); + } }, /** @@ -1260,37 +1240,39 @@ * Positions every node in a more readable manner * @method arrange */ - LGraph.prototype.arrange = function(margin) { + LGraph.prototype.arrange = function (margin, layout) { margin = margin || 100; - var nodes = this.computeExecutionOrder(false, true); - var columns = []; - for (var i = 0; i < nodes.length; ++i) { - var node = nodes[i]; - var col = node._level || 1; + const nodes = this.computeExecutionOrder(false, true); + const columns = []; + for (let i = 0; i < nodes.length; ++i) { + const node = nodes[i]; + const col = node._level || 1; if (!columns[col]) { columns[col] = []; } columns[col].push(node); } - var x = margin; + let x = margin; - for (var i = 0; i < columns.length; ++i) { - var column = columns[i]; + for (let i = 0; i < columns.length; ++i) { + const column = columns[i]; if (!column) { continue; } - var max_size = 100; - var y = margin + LiteGraph.NODE_TITLE_HEIGHT; - for (var j = 0; j < column.length; ++j) { - var node = column[j]; - node.pos[0] = x; - node.pos[1] = y; - if (node.size[0] > max_size) { - max_size = node.size[0]; + let max_size = 100; + let y = margin + LiteGraph.NODE_TITLE_HEIGHT; + for (let j = 0; j < column.length; ++j) { + const node = column[j]; + node.pos[0] = (layout == LiteGraph.VERTICAL_LAYOUT) ? y : x; + node.pos[1] = (layout == LiteGraph.VERTICAL_LAYOUT) ? x : y; + const max_size_index = (layout == LiteGraph.VERTICAL_LAYOUT) ? 1 : 0; + if (node.size[max_size_index] > max_size) { + max_size = node.size[max_size_index]; } - y += node.size[1] + margin + LiteGraph.NODE_TITLE_HEIGHT; + const node_size_index = (layout == LiteGraph.VERTICAL_LAYOUT) ? 0 : 1; + y += node.size[node_size_index] + margin + LiteGraph.NODE_TITLE_HEIGHT; } x += max_size + margin; } @@ -2468,43 +2450,34 @@ this.title = this.constructor.title; } - if (this.onConnectionsChange) { - if (this.inputs) { - for (var i = 0; i < this.inputs.length; ++i) { - var input = this.inputs[i]; - var link_info = this.graph - ? this.graph.links[input.link] - : null; - this.onConnectionsChange( - LiteGraph.INPUT, - i, - true, - link_info, - input - ); //link_info has been created now, so its updated - } - } + if (this.inputs) { + for (var i = 0; i < this.inputs.length; ++i) { + var input = this.inputs[i]; + var link_info = this.graph ? this.graph.links[input.link] : null; + if (this.onConnectionsChange) + this.onConnectionsChange( LiteGraph.INPUT, i, true, link_info, input ); //link_info has been created now, so its updated - if (this.outputs) { - for (var i = 0; i < this.outputs.length; ++i) { - var output = this.outputs[i]; - if (!output.links) { - continue; - } - for (var j = 0; j < output.links.length; ++j) { - var link_info = this.graph - ? this.graph.links[output.links[j]] - : null; - this.onConnectionsChange( - LiteGraph.OUTPUT, - i, - true, - link_info, - output - ); //link_info has been created now, so its updated - } - } - } + if( this.onInputAdded ) + this.onInputAdded(input); + + } + } + + if (this.outputs) { + for (var i = 0; i < this.outputs.length; ++i) { + var output = this.outputs[i]; + if (!output.links) { + continue; + } + for (var j = 0; j < output.links.length; ++j) { + var link_info = this.graph ? this.graph.links[output.links[j]] : null; + if (this.onConnectionsChange) + this.onConnectionsChange( LiteGraph.OUTPUT, i, true, link_info, output ); //link_info has been created now, so its updated + } + + if( this.onOutputAdded ) + this.onOutputAdded(output); + } } if( this.widgets ) @@ -3200,6 +3173,15 @@ return; } + if(slot == null) + { + console.error("slot must be a number"); + return; + } + + if(slot.constructor !== Number) + console.warn("slot must be a number, use node.trigger('name') if you want to use a string"); + var output = this.outputs[slot]; if (!output) { return; @@ -3346,26 +3328,26 @@ * @param {Object} extra_info this can be used to have special properties of an output (label, special color, position, etc) */ LGraphNode.prototype.addOutput = function(name, type, extra_info) { - var o = { name: name, type: type, links: null }; + var output = { name: name, type: type, links: null }; if (extra_info) { for (var i in extra_info) { - o[i] = extra_info[i]; + output[i] = extra_info[i]; } } if (!this.outputs) { this.outputs = []; } - this.outputs.push(o); + this.outputs.push(output); if (this.onOutputAdded) { - this.onOutputAdded(o); + this.onOutputAdded(output); } if (LiteGraph.auto_load_slot_types) LiteGraph.registerNodeAndSlotType(this,type,true); this.setSize( this.computeSize() ); this.setDirtyCanvas(true, true); - return o; + return output; }; /** @@ -3437,10 +3419,10 @@ */ LGraphNode.prototype.addInput = function(name, type, extra_info) { type = type || 0; - var o = { name: name, type: type, link: null }; + var input = { name: name, type: type, link: null }; if (extra_info) { for (var i in extra_info) { - o[i] = extra_info[i]; + input[i] = extra_info[i]; } } @@ -3448,17 +3430,17 @@ this.inputs = []; } - this.inputs.push(o); + this.inputs.push(input); this.setSize( this.computeSize() ); if (this.onInputAdded) { - this.onInputAdded(o); + this.onInputAdded(input); } LiteGraph.registerNodeAndSlotType(this,type); this.setDirtyCanvas(true, true); - return o; + return input; }; /** @@ -5210,6 +5192,7 @@ LGraphNode.prototype.executeAction = function(action) this.allow_dragcanvas = true; this.allow_dragnodes = true; this.allow_interaction = true; //allow to control widgets, buttons, collapse, etc + this.multi_select = false; //allow selecting multi nodes without pressing extra keys this.allow_searchbox = true; this.allow_reconnect_links = true; //allows to change a connection with having to redo it again this.align_to_grid = false; //snap to grid @@ -5435,7 +5418,7 @@ LGraphNode.prototype.executeAction = function(action) }; /** - * returns the visualy active graph (in case there are more in the stack) + * returns the visually active graph (in case there are more in the stack) * @method getCurrentGraph * @return {LGraph} the active graph */ @@ -6060,9 +6043,13 @@ LGraphNode.prototype.executeAction = function(action) this.graph.beforeChange(); this.node_dragged = node; } - if (!this.selected_nodes[node.id]) { - this.processNodeSelected(node, e); - } + this.processNodeSelected(node, e); + } else { // double-click + /** + * Don't call the function if the block is already selected. + * Otherwise, it could cause the block to be unselected while its panel is open. + */ + if (!node.is_selected) this.processNodeSelected(node, e); } this.dirty_canvas = true; @@ -6474,6 +6461,10 @@ LGraphNode.prototype.executeAction = function(action) var n = this.selected_nodes[i]; n.pos[0] += delta[0] / this.ds.scale; n.pos[1] += delta[1] / this.ds.scale; + if (!n.is_selected) this.processNodeSelected(n, e); /* + * Don't call the function if the block is already selected. + * Otherwise, it could cause the block to be unselected while dragging. + */ } this.dirty_canvas = true; @@ -7287,7 +7278,7 @@ LGraphNode.prototype.executeAction = function(action) }; LGraphCanvas.prototype.processNodeSelected = function(node, e) { - this.selectNode(node, e && (e.shiftKey||e.ctrlKey)); + this.selectNode(node, e && (e.shiftKey || e.ctrlKey || this.multi_select)); if (this.onNodeSelected) { this.onNodeSelected(node); } @@ -7323,6 +7314,7 @@ LGraphNode.prototype.executeAction = function(action) for (var i in nodes) { var node = nodes[i]; if (node.is_selected) { + this.deselectNode(node); continue; } @@ -7489,8 +7481,8 @@ LGraphNode.prototype.executeAction = function(action) clientY_rel = e.clientY; } - e.deltaX = clientX_rel - this.last_mouse_position[0]; - e.deltaY = clientY_rel- this.last_mouse_position[1]; + // e.deltaX = clientX_rel - this.last_mouse_position[0]; + // e.deltaY = clientY_rel- this.last_mouse_position[1]; this.last_mouse_position[0] = clientX_rel; this.last_mouse_position[1] = clientY_rel; @@ -9742,13 +9734,17 @@ LGraphNode.prototype.executeAction = function(action) ctx.fillRect(margin, y, widget_width - margin * 2, H); var range = w.options.max - w.options.min; var nvalue = (w.value - w.options.min) / range; - ctx.fillStyle = active_widget == w ? "#89A" : "#678"; + if(nvalue < 0.0) nvalue = 0.0; + if(nvalue > 1.0) nvalue = 1.0; + ctx.fillStyle = w.options.hasOwnProperty("slider_color") ? w.options.slider_color : (active_widget == w ? "#89A" : "#678"); ctx.fillRect(margin, y, nvalue * (widget_width - margin * 2), H); if(show_text && !w.disabled) ctx.strokeRect(margin, y, widget_width - margin * 2, H); if (w.marker) { var marker_nvalue = (w.marker - w.options.min) / range; - ctx.fillStyle = "#AA9"; + if(marker_nvalue < 0.0) marker_nvalue = 0.0; + if(marker_nvalue > 1.0) marker_nvalue = 1.0; + ctx.fillStyle = w.options.hasOwnProperty("marker_color") ? w.options.marker_color : "#AA9"; ctx.fillRect( margin + marker_nvalue * (widget_width - margin * 2), y, 2, H ); } if (show_text) { @@ -9915,6 +9911,7 @@ LGraphNode.prototype.executeAction = function(action) case "slider": var range = w.options.max - w.options.min; var nvalue = Math.clamp((x - 15) / (widget_width - 30), 0, 1); + if(w.options.read_only) break; w.value = w.options.min + (w.options.max - w.options.min) * nvalue; if (w.callback) { setTimeout(function() { @@ -9927,7 +9924,8 @@ LGraphNode.prototype.executeAction = function(action) case "combo": var old_value = w.value; if (event.type == LiteGraph.pointerevents_method+"move" && w.type == "number") { - w.value += event.deltaX * 0.1 * (w.options.step || 1); + if(event.deltaX) + w.value += event.deltaX * 0.1 * (w.options.step || 1); if ( w.options.min != null && w.value < w.options.min ) { w.value = w.options.min; } @@ -9994,6 +9992,12 @@ LGraphNode.prototype.executeAction = function(action) var delta = x < 40 ? -1 : x > widget_width - 40 ? 1 : 0; if (event.click_time < 200 && delta == 0) { this.prompt("Value",w.value,function(v) { + // check if v is a valid equation or a number + if (/^[0-9+\-*/()\s]+$/.test(v)) { + try {//solve the equation if possible + v = eval(v); + } catch (e) { } + } this.value = Number(v); inner_value_change(this, this.value); }.bind(w), @@ -10022,7 +10026,6 @@ LGraphNode.prototype.executeAction = function(action) case "text": if (event.type == LiteGraph.pointerevents_method+"down") { this.prompt("Value",w.value,function(v) { - this.value = v; inner_value_change(this, v); }.bind(w), event,w.options ? w.options.multiline : false ); @@ -10047,6 +10050,9 @@ LGraphNode.prototype.executeAction = function(action) }//end for function inner_value_change(widget, value) { + if(widget.type == "number"){ + value = Number(value); + } widget.value = value; if ( widget.options && widget.options.property && node.properties[widget.options.property] !== undefined ) { node.setProperty( widget.options.property, value ); @@ -11165,7 +11171,7 @@ LGraphNode.prototype.executeAction = function(action) LGraphCanvas.search_limit = -1; LGraphCanvas.prototype.showSearchBox = function(event, options) { // proposed defaults - def_options = { slot_from: null + var def_options = { slot_from: null ,node_from: null ,node_to: null ,do_type_filter: LiteGraph.search_filter_enabled // TODO check for registered_slot_[in/out]_types not empty // this will be checked for functionality enabled : filter on slot type, in and out @@ -11863,7 +11869,7 @@ LGraphNode.prototype.executeAction = function(action) // TODO refactor, theer are different dialog, some uses createDialog, some dont LGraphCanvas.prototype.createDialog = function(html, options) { - def_options = { checkForInput: false, closeOnLeave: true, closeOnLeave_checkModified: true }; + var def_options = { checkForInput: false, closeOnLeave: true, closeOnLeave_checkModified: true }; options = Object.assign(def_options, options || {}); var dialog = document.createElement("div"); @@ -11993,7 +11999,8 @@ LGraphNode.prototype.executeAction = function(action) if (root.onClose && typeof root.onClose == "function"){ root.onClose(); } - root.parentNode.removeChild(root); + if(root.parentNode) + root.parentNode.removeChild(root); /* XXX CHECK THIS */ if(this.parentNode){ this.parentNode.removeChild(this); @@ -12285,7 +12292,7 @@ LGraphNode.prototype.executeAction = function(action) var ref_window = this.getCanvasWindow(); var that = this; var graphcanvas = this; - panel = this.createPanel(node.title || "",{ + var panel = this.createPanel(node.title || "",{ closable: true ,window: ref_window ,onOpen: function(){ From 7e254d2f690ddd49d4d164b56be0e534a1fd2a2d Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 7 Apr 2023 15:52:56 -0400 Subject: [PATCH 29/42] Clarify what --windows-standalone-build does. --- comfy/cli_args.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/cli_args.py b/comfy/cli_args.py index 739891f71..b24054ce0 100644 --- a/comfy/cli_args.py +++ b/comfy/cli_args.py @@ -26,6 +26,6 @@ vram_group.add_argument("--cpu", action="store_true", help="To use the CPU for e parser.add_argument("--dont-print-server", action="store_true", help="Don't print server output.") parser.add_argument("--quick-test-for-ci", action="store_true", help="Quick test for CI.") -parser.add_argument("--windows-standalone-build", action="store_true", help="Windows standalone build.") +parser.add_argument("--windows-standalone-build", action="store_true", help="Windows standalone build: Enable convenient things that most people using the standalone windows build will probably enjoy (like auto opening the page on startup).") args = parser.parse_args() From f4e359cce1fe0e929bde617083a414961dd871b3 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 7 Apr 2023 16:26:06 -0400 Subject: [PATCH 30/42] Set title of page to ComfyUI. --- web/index.html | 1 + 1 file changed, 1 insertion(+) diff --git a/web/index.html b/web/index.html index 86156a7f8..bb79433ce 100644 --- a/web/index.html +++ b/web/index.html @@ -2,6 +2,7 @@ + ComfyUI From 2d7ad41142a941f7d82e166869bb1e8372a559ac Mon Sep 17 00:00:00 2001 From: EllangoK Date: Fri, 7 Apr 2023 19:42:03 -0400 Subject: [PATCH 31/42] colorPalette sets LiteGraph colors --- web/extensions/core/colorPalette.js | 34 +++++++++++++++++++++++++++-- 1 file changed, 32 insertions(+), 2 deletions(-) diff --git a/web/extensions/core/colorPalette.js b/web/extensions/core/colorPalette.js index e54bc2a38..8e542b5c2 100644 --- a/web/extensions/core/colorPalette.js +++ b/web/extensions/core/colorPalette.js @@ -21,8 +21,31 @@ const colorPalettes = { "MODEL": "#B39DDB", // light lavender-purple "STYLE_MODEL": "#C2FFAE", // light green-yellow "VAE": "#FF6E6E", // bright red - } - } + }, + "litegraph_base": { + "NODE_TITLE_COLOR": "#999", + "NODE_SELECTED_TITLE_COLOR": "#FFF", + "NODE_TEXT_SIZE": 14, + "NODE_TEXT_COLOR": "#AAA", + "NODE_SUBTEXT_SIZE": 12, + "NODE_DEFAULT_COLOR": "#333", + "NODE_DEFAULT_BGCOLOR": "#353535", + "NODE_DEFAULT_BOXCOLOR": "#666", + "NODE_DEFAULT_SHAPE": "box", + "NODE_BOX_OUTLINE_COLOR": "#FFF", + "DEFAULT_SHADOW_COLOR": "rgba(0,0,0,0.5)", + "DEFAULT_GROUP_FONT": 24, + + "WIDGET_BGCOLOR": "#222", + "WIDGET_OUTLINE_COLOR": "#666", + "WIDGET_TEXT_COLOR": "#DDD", + "WIDGET_SECONDARY_TEXT_COLOR": "#999", + + "LINK_COLOR": "#9A9", + "EVENT_LINK_COLOR": "#A86", + "CONNECTING_LINK_COLOR": "#AFA", + }, + }, }, "palette_2": { "id": "palette_2", @@ -194,6 +217,13 @@ app.registerExtension({ Object.assign(app.canvas.default_connection_color_byType, colorPalette.colors.node_slot); app.canvas.draw(true, true); } + if (colorPalette.colors.litegraph_base) { + for (const key in colorPalette.colors.litegraph_base) { + if (colorPalette.colors.litegraph_base.hasOwnProperty(key) && LiteGraph.hasOwnProperty(key)) { + LiteGraph[key] = colorPalette.colors.litegraph_base[key]; + } + } + } } }; From 7bce83aa0394a01c6624e7d1a5763653e52e6eb6 Mon Sep 17 00:00:00 2001 From: EllangoK Date: Fri, 7 Apr 2023 20:12:24 -0400 Subject: [PATCH 32/42] fixes NODE_TITLE_COLOR not being set --- web/extensions/core/colorPalette.js | 60 +++++++++++++++++++++-------- 1 file changed, 43 insertions(+), 17 deletions(-) diff --git a/web/extensions/core/colorPalette.js b/web/extensions/core/colorPalette.js index 8e542b5c2..d212602c6 100644 --- a/web/extensions/core/colorPalette.js +++ b/web/extensions/core/colorPalette.js @@ -47,25 +47,48 @@ const colorPalettes = { }, }, }, - "palette_2": { - "id": "palette_2", - "name": "Palette 2", + "solarized": { + "id": "solarized", + "name": "Solarized", "colors": { "node_slot": { - "CLIP": "#556B2F", // Dark Olive Green - "CLIP_VISION": "#4B0082", // Indigo - "CLIP_VISION_OUTPUT": "#006400", // Green - "CONDITIONING": "#FF1493", // Deep Pink - "CONTROL_NET": "#8B4513", // Saddle Brown - "IMAGE": "#8B0000", // Dark Red - "LATENT": "#00008B", // Dark Blue - "MASK": "#2F4F4F", // Dark Slate Grey - "MODEL": "#FF8C00", // Dark Orange - "STYLE_MODEL": "#004A4A", // Sherpa Blue - "UPSCALE_MODEL": "#4A004A", // Tyrian Purple - "VAE": "#4F394F", // Loulou - } - } + "CLIP": "#859900", // Green + "CLIP_VISION": "#6c71c4", // Indigo + "CLIP_VISION_OUTPUT": "#859900", // Green + "CONDITIONING": "#d33682", // Magenta + "CONTROL_NET": "#cb4b16", // Orange + "IMAGE": "#dc322f", // Red + "LATENT": "#268bd2", // Blue + "MASK": "#073642", // Base02 + "MODEL": "#cb4b16", // Orange + "STYLE_MODEL": "#073642", // Base02 + "UPSCALE_MODEL": "#6c71c4", // Indigo + "VAE": "#586e75", // Base1 + }, + "litegraph_base": { + "NODE_TITLE_COLOR": "#fdf6e3", + "NODE_SELECTED_TITLE_COLOR": "#b58900", + "NODE_TEXT_SIZE": 14, + "NODE_TEXT_COLOR": "#657b83", + "NODE_SUBTEXT_SIZE": 12, + "NODE_DEFAULT_COLOR": "#586e75", + "NODE_DEFAULT_BGCOLOR": "#073642", + "NODE_DEFAULT_BOXCOLOR": "#839496", + "NODE_DEFAULT_SHAPE": "box", + "NODE_BOX_OUTLINE_COLOR": "#fdf6e3", + "DEFAULT_SHADOW_COLOR": "rgba(0,0,0,0.5)", + "DEFAULT_GROUP_FONT": 24, + + "WIDGET_BGCOLOR": "#002b36", + "WIDGET_OUTLINE_COLOR": "#839496", + "WIDGET_TEXT_COLOR": "#fdf6e3", + "WIDGET_SECONDARY_TEXT_COLOR": "#93a1a1", + + "LINK_COLOR": "#2aa198", + "EVENT_LINK_COLOR": "#268bd2", + "CONNECTING_LINK_COLOR": "#859900", + }, + }, } }; @@ -218,6 +241,9 @@ app.registerExtension({ app.canvas.draw(true, true); } if (colorPalette.colors.litegraph_base) { + // Everything updates correctly in the loop, except the Node Title for some reason + app.canvas.node_title_color = colorPalette.colors.litegraph_base.NODE_TITLE_COLOR; + for (const key in colorPalette.colors.litegraph_base) { if (colorPalette.colors.litegraph_base.hasOwnProperty(key) && LiteGraph.hasOwnProperty(key)) { LiteGraph[key] = colorPalette.colors.litegraph_base[key]; From 463792f06826227c711a5b2e729cafbe5b8e6ce8 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 7 Apr 2023 23:07:19 -0400 Subject: [PATCH 33/42] Allow dragging again. --- web/lib/litegraph.core.js | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/web/lib/litegraph.core.js b/web/lib/litegraph.core.js index 066f51938..c3efa22a9 100644 --- a/web/lib/litegraph.core.js +++ b/web/lib/litegraph.core.js @@ -7481,8 +7481,8 @@ LGraphNode.prototype.executeAction = function(action) clientY_rel = e.clientY; } - // e.deltaX = clientX_rel - this.last_mouse_position[0]; - // e.deltaY = clientY_rel- this.last_mouse_position[1]; + e.deltaX = clientX_rel - this.last_mouse_position[0]; + e.deltaY = clientY_rel- this.last_mouse_position[1]; this.last_mouse_position[0] = clientX_rel; this.last_mouse_position[1] = clientY_rel; @@ -9923,7 +9923,14 @@ LGraphNode.prototype.executeAction = function(action) case "number": case "combo": var old_value = w.value; - if (event.type == LiteGraph.pointerevents_method+"move" && w.type == "number") { + var delta = x < 40 ? -1 : x > widget_width - 40 ? 1 : 0; + var allow_scroll = true; + if (delta) { + if (x > -3 && x < widget_width + 3) { + allow_scroll = false; + } + } + if (allow_scroll && event.type == LiteGraph.pointerevents_method+"move" && w.type == "number") { if(event.deltaX) w.value += event.deltaX * 0.1 * (w.options.step || 1); if ( w.options.min != null && w.value < w.options.min ) { From ebd7f9bf80213a44a8e2cadc75875a4b980991e5 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sat, 8 Apr 2023 01:10:15 -0400 Subject: [PATCH 34/42] Change the default prompt to something more impressive. --- web/scripts/defaultGraph.js | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/web/scripts/defaultGraph.js b/web/scripts/defaultGraph.js index 967377ad6..9b3cb4a7e 100644 --- a/web/scripts/defaultGraph.js +++ b/web/scripts/defaultGraph.js @@ -13,7 +13,7 @@ export const defaultGraph = { inputs: [{ name: "clip", type: "CLIP", link: 5 }], outputs: [{ name: "CONDITIONING", type: "CONDITIONING", links: [6], slot_index: 0 }], properties: {}, - widgets_values: ["bad hands"], + widgets_values: ["text, watermark"], }, { id: 6, @@ -26,7 +26,7 @@ export const defaultGraph = { inputs: [{ name: "clip", type: "CLIP", link: 3 }], outputs: [{ name: "CONDITIONING", type: "CONDITIONING", links: [4], slot_index: 0 }], properties: {}, - widgets_values: ["masterpiece best quality girl"], + widgets_values: ["beautiful scenery nature glass bottle landscape, , purple galaxy bottle,"], }, { id: 5, @@ -56,7 +56,7 @@ export const defaultGraph = { ], outputs: [{ name: "LATENT", type: "LATENT", links: [7], slot_index: 0 }], properties: {}, - widgets_values: [8566257, true, 20, 8, "euler", "normal", 1], + widgets_values: [156680208700286, true, 20, 8, "euler", "normal", 1], }, { id: 8, From 5bb1358dd66a310793a1ea1a8cfcb037618fe37e Mon Sep 17 00:00:00 2001 From: EllangoK Date: Sat, 8 Apr 2023 09:28:35 -0400 Subject: [PATCH 35/42] manual set default link color --- web/extensions/core/colorPalette.js | 3 +- web/scripts/test.js | 71 +++++++++++++++++++++++++++++ 2 files changed, 73 insertions(+), 1 deletion(-) create mode 100644 web/scripts/test.js diff --git a/web/extensions/core/colorPalette.js b/web/extensions/core/colorPalette.js index d212602c6..30a984607 100644 --- a/web/extensions/core/colorPalette.js +++ b/web/extensions/core/colorPalette.js @@ -238,11 +238,11 @@ app.registerExtension({ if (colorPalette.colors) { if (colorPalette.colors.node_slot) { Object.assign(app.canvas.default_connection_color_byType, colorPalette.colors.node_slot); - app.canvas.draw(true, true); } if (colorPalette.colors.litegraph_base) { // Everything updates correctly in the loop, except the Node Title for some reason app.canvas.node_title_color = colorPalette.colors.litegraph_base.NODE_TITLE_COLOR; + app.canvas.default_link_color = colorPalette.colors.litegraph_base.LINK_COLOR; for (const key in colorPalette.colors.litegraph_base) { if (colorPalette.colors.litegraph_base.hasOwnProperty(key) && LiteGraph.hasOwnProperty(key)) { @@ -250,6 +250,7 @@ app.registerExtension({ } } } + app.canvas.draw(true, true); } }; diff --git a/web/scripts/test.js b/web/scripts/test.js new file mode 100644 index 000000000..55f8dfb69 --- /dev/null +++ b/web/scripts/test.js @@ -0,0 +1,71 @@ +(function() { + var LGraphCanvas = LiteGraph.LGraphCanvas; + var LGraph = LiteGraph.LGraph; + + // Save the original renderLink function + var originalRenderLink = LGraphCanvas.prototype.renderLink; + + // Save the original connect function + var originalConnect = LGraph.prototype.connect; + + // Override the connect function + LGraph.prototype.connect = function ( + origin_slot, + target_slot, + options + ) { + var origin_id = origin_slot[0]; + var target_id = target_slot[0]; + + var origin_node = this.getNodeById(origin_id); + var target_node = this.getNodeById(target_id); + + + if (origin_node && target_node) { + var output_slot = origin_slot[1]; + var output_slot_info = origin_node.getOutputInfo(output_slot); + + + console.log(output_slot_info) + if (output_slot_info) { + options = options || {}; + options.color = output_slot_info.label_color || null; + } + } + + return originalConnect.call(this, origin_slot, target_slot, options); + }; + + // Override the renderLink function + LGraphCanvas.prototype.renderLink = function ( + ctx, + a, + b, + link, + skip_border, + flow, + color, + start_dir, + end_dir, + num_sublines + ) { + if (link && link.color) { + color = link.color; + } + + // Call the original renderLink function with the new color + originalRenderLink.call( + this, + ctx, + a, + b, + link, + skip_border, + flow, + color, + start_dir, + end_dir, + num_sublines + ); + }; +})(); From 620c0f937665a1001f61195a666565bfab28103a Mon Sep 17 00:00:00 2001 From: EllangoK Date: Sat, 8 Apr 2023 10:43:04 -0400 Subject: [PATCH 36/42] link color is set to nodeType color --- web/extensions/core/colorPalette.js | 59 ++++++++++++++++++++++++++++- 1 file changed, 58 insertions(+), 1 deletion(-) diff --git a/web/extensions/core/colorPalette.js b/web/extensions/core/colorPalette.js index 30a984607..3c0300bf0 100644 --- a/web/extensions/core/colorPalette.js +++ b/web/extensions/core/colorPalette.js @@ -240,7 +240,7 @@ app.registerExtension({ Object.assign(app.canvas.default_connection_color_byType, colorPalette.colors.node_slot); } if (colorPalette.colors.litegraph_base) { - // Everything updates correctly in the loop, except the Node Title for some reason + // Everything updates correctly in the loop, except the Node Title and Link Color for some reason app.canvas.node_title_color = colorPalette.colors.litegraph_base.NODE_TITLE_COLOR; app.canvas.default_link_color = colorPalette.colors.litegraph_base.LINK_COLOR; @@ -250,6 +250,63 @@ app.registerExtension({ } } } + + (function(colorPalette) { + var LGraphCanvas = LiteGraph.LGraphCanvas; + var LGraph = LiteGraph.LGraph; + + // Save the original renderLink function + var originalRenderLink = LGraphCanvas.prototype.renderLink; + + // Override the renderLink function + LGraphCanvas.prototype.renderLink = function ( + ctx, + a, + b, + link, + skip_border, + flow, + color, + start_dir, + end_dir, + num_sublines + ) { + if (link && link.color) { + color = link.color; + } + if (link) { + const inputNode = this.graph.getNodeById(link.origin_id); + const outputNode = this.graph.getNodeById(link.target_id); + + let data = null; + if (inputNode.outputs.length > 1) { + data = outputNode.inputs[0]; + } else { + data = inputNode.outputs[0]; + } + let nodeType = data.type; + color = "#" + Math.floor(Math.random() * 16777215).toString(16); + + color = colorPalette.colors.node_slot[nodeType]; + } + + // Call the original renderLink function with the new color + originalRenderLink.call( + this, + ctx, + a, + b, + link, + skip_border, + flow, + color, + start_dir, + end_dir, + num_sublines + ); + }; + })(colorPalette); + app.canvas.draw(true, true); } }; From 6b638c965b6fec1c88947212792dc299acf67b36 Mon Sep 17 00:00:00 2001 From: EllangoK Date: Sat, 8 Apr 2023 10:47:15 -0400 Subject: [PATCH 37/42] matches entry for correct link color --- web/extensions/core/colorPalette.js | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/web/extensions/core/colorPalette.js b/web/extensions/core/colorPalette.js index 3c0300bf0..e84bf6c0e 100644 --- a/web/extensions/core/colorPalette.js +++ b/web/extensions/core/colorPalette.js @@ -284,7 +284,20 @@ app.registerExtension({ } else { data = inputNode.outputs[0]; } - let nodeType = data.type; + + const matchingEntry = inputNode.outputs.find(output => { + return outputNode.inputs.some(input => input.type === output.type); + }); + + console.log("matchingEntry: ", matchingEntry) + + const inputTypes = inputNode.outputs.map(output => output.type); + console.log("Input types:", inputTypes); + + const outputTypes = outputNode.inputs.map(input => input.type); + console.log("Output types:", outputTypes); + + let nodeType = matchingEntry.type; color = "#" + Math.floor(Math.random() * 16777215).toString(16); color = colorPalette.colors.node_slot[nodeType]; From 0d358b95665a4a2c3edbdf2a4e51920e4c773fb1 Mon Sep 17 00:00:00 2001 From: EllangoK Date: Sat, 8 Apr 2023 10:59:34 -0400 Subject: [PATCH 38/42] cleanup customizeRenderLink --- web/extensions/core/colorPalette.js | 127 +++++++++++++--------------- 1 file changed, 57 insertions(+), 70 deletions(-) diff --git a/web/extensions/core/colorPalette.js b/web/extensions/core/colorPalette.js index e84bf6c0e..2189e3c29 100644 --- a/web/extensions/core/colorPalette.js +++ b/web/extensions/core/colorPalette.js @@ -250,76 +250,7 @@ app.registerExtension({ } } } - - (function(colorPalette) { - var LGraphCanvas = LiteGraph.LGraphCanvas; - var LGraph = LiteGraph.LGraph; - - // Save the original renderLink function - var originalRenderLink = LGraphCanvas.prototype.renderLink; - - // Override the renderLink function - LGraphCanvas.prototype.renderLink = function ( - ctx, - a, - b, - link, - skip_border, - flow, - color, - start_dir, - end_dir, - num_sublines - ) { - if (link && link.color) { - color = link.color; - } - if (link) { - const inputNode = this.graph.getNodeById(link.origin_id); - const outputNode = this.graph.getNodeById(link.target_id); - - let data = null; - if (inputNode.outputs.length > 1) { - data = outputNode.inputs[0]; - } else { - data = inputNode.outputs[0]; - } - - const matchingEntry = inputNode.outputs.find(output => { - return outputNode.inputs.some(input => input.type === output.type); - }); - - console.log("matchingEntry: ", matchingEntry) - - const inputTypes = inputNode.outputs.map(output => output.type); - console.log("Input types:", inputTypes); - - const outputTypes = outputNode.inputs.map(input => input.type); - console.log("Output types:", outputTypes); - - let nodeType = matchingEntry.type; - color = "#" + Math.floor(Math.random() * 16777215).toString(16); - - color = colorPalette.colors.node_slot[nodeType]; - } - - // Call the original renderLink function with the new color - originalRenderLink.call( - this, - ctx, - a, - b, - link, - skip_border, - flow, - color, - start_dir, - end_dir, - num_sublines - ); - }; - })(colorPalette); - + customizeRenderLink(colorPalette); app.canvas.draw(true, true); } }; @@ -476,3 +407,59 @@ app.registerExtension({ }); }, }); + +function customizeRenderLink(colorPalette) { + var LGraphCanvas = LiteGraph.LGraphCanvas; + + function getLinkColor(link, inputNode, outputNode, colorPalette) { + let color = null; + if (link && link.color) { + color = link.color; + } else if (link) { + const matchingEntry = inputNode.outputs.find((output) => { + return outputNode.inputs.some((input) => input.type === output.type); + }); + + if (matchingEntry) { + let nodeType = matchingEntry.type; + color = colorPalette.colors.node_slot[nodeType]; + } + } + return color; + } + + var originalRenderLink = LGraphCanvas.prototype.renderLink; + + LGraphCanvas.prototype.renderLink = function( + ctx, + a, + b, + link, + skip_border, + flow, + color, + start_dir, + end_dir, + num_sublines + ) { + if (link) { + const inputNode = this.graph.getNodeById(link.origin_id); + const outputNode = this.graph.getNodeById(link.target_id); + color = getLinkColor(link, inputNode, outputNode, colorPalette); + } + + originalRenderLink.call( + this, + ctx, + a, + b, + link, + skip_border, + flow, + color, + start_dir, + end_dir, + num_sublines + ); + }; +} \ No newline at end of file From 327e49da42a76aa84d86e7236b95e5490e860a37 Mon Sep 17 00:00:00 2001 From: EllangoK Date: Sat, 8 Apr 2023 11:13:27 -0400 Subject: [PATCH 39/42] remove test render link file --- web/extensions/core/colorPalette.js | 2 +- web/scripts/test.js | 71 ----------------------------- 2 files changed, 1 insertion(+), 72 deletions(-) delete mode 100644 web/scripts/test.js diff --git a/web/extensions/core/colorPalette.js b/web/extensions/core/colorPalette.js index 2189e3c29..a04a422b9 100644 --- a/web/extensions/core/colorPalette.js +++ b/web/extensions/core/colorPalette.js @@ -462,4 +462,4 @@ function customizeRenderLink(colorPalette) { num_sublines ); }; -} \ No newline at end of file +} diff --git a/web/scripts/test.js b/web/scripts/test.js deleted file mode 100644 index 55f8dfb69..000000000 --- a/web/scripts/test.js +++ /dev/null @@ -1,71 +0,0 @@ -(function() { - var LGraphCanvas = LiteGraph.LGraphCanvas; - var LGraph = LiteGraph.LGraph; - - // Save the original renderLink function - var originalRenderLink = LGraphCanvas.prototype.renderLink; - - // Save the original connect function - var originalConnect = LGraph.prototype.connect; - - // Override the connect function - LGraph.prototype.connect = function ( - origin_slot, - target_slot, - options - ) { - var origin_id = origin_slot[0]; - var target_id = target_slot[0]; - - var origin_node = this.getNodeById(origin_id); - var target_node = this.getNodeById(target_id); - - - if (origin_node && target_node) { - var output_slot = origin_slot[1]; - var output_slot_info = origin_node.getOutputInfo(output_slot); - - - console.log(output_slot_info) - if (output_slot_info) { - options = options || {}; - options.color = output_slot_info.label_color || null; - } - } - - return originalConnect.call(this, origin_slot, target_slot, options); - }; - - // Override the renderLink function - LGraphCanvas.prototype.renderLink = function ( - ctx, - a, - b, - link, - skip_border, - flow, - color, - start_dir, - end_dir, - num_sublines - ) { - if (link && link.color) { - color = link.color; - } - - // Call the original renderLink function with the new color - originalRenderLink.call( - this, - ctx, - a, - b, - link, - skip_border, - flow, - color, - start_dir, - end_dir, - num_sublines - ); - }; -})(); From d9220a0bd6f60ad06e73a4d631bbc63ec8592c1c Mon Sep 17 00:00:00 2001 From: EllangoK Date: Sat, 8 Apr 2023 11:55:27 -0400 Subject: [PATCH 40/42] link colors change after palette swap didn't work previously as same function was defined repeatedly --- web/extensions/core/colorPalette.js | 39 ++++++++++++++++------------- 1 file changed, 22 insertions(+), 17 deletions(-) diff --git a/web/extensions/core/colorPalette.js b/web/extensions/core/colorPalette.js index a04a422b9..c1473b93f 100644 --- a/web/extensions/core/colorPalette.js +++ b/web/extensions/core/colorPalette.js @@ -238,6 +238,7 @@ app.registerExtension({ if (colorPalette.colors) { if (colorPalette.colors.node_slot) { Object.assign(app.canvas.default_connection_color_byType, colorPalette.colors.node_slot); + customizeRenderLink(colorPalette); } if (colorPalette.colors.litegraph_base) { // Everything updates correctly in the loop, except the Node Title and Link Color for some reason @@ -250,7 +251,6 @@ app.registerExtension({ } } } - customizeRenderLink(colorPalette); app.canvas.draw(true, true); } }; @@ -411,24 +411,28 @@ app.registerExtension({ function customizeRenderLink(colorPalette) { var LGraphCanvas = LiteGraph.LGraphCanvas; - function getLinkColor(link, inputNode, outputNode, colorPalette) { - let color = null; - if (link && link.color) { - color = link.color; - } else if (link) { - const matchingEntry = inputNode.outputs.find((output) => { - return outputNode.inputs.some((input) => input.type === output.type); - }); + if (!LGraphCanvas.prototype.getLinkColor) { + LGraphCanvas.prototype.getLinkColor = function(link, inputNode, outputNode, colorPalette) { + let color = null; + if (link && link.color) { + color = link.color; + } else if (link) { + const matchingEntry = inputNode.outputs.find((output) => { + return outputNode.inputs.some((input) => input.type === output.type); + }); - if (matchingEntry) { - let nodeType = matchingEntry.type; - color = colorPalette.colors.node_slot[nodeType]; + if (matchingEntry) { + let nodeType = matchingEntry.type; + color = colorPalette.colors.node_slot[nodeType]; + } } - } - return color; + return color; + }; } - var originalRenderLink = LGraphCanvas.prototype.renderLink; + if (!LGraphCanvas.prototype.originalRenderLink) { + LGraphCanvas.prototype.originalRenderLink = LGraphCanvas.prototype.renderLink; + } LGraphCanvas.prototype.renderLink = function( ctx, @@ -445,10 +449,11 @@ function customizeRenderLink(colorPalette) { if (link) { const inputNode = this.graph.getNodeById(link.origin_id); const outputNode = this.graph.getNodeById(link.target_id); - color = getLinkColor(link, inputNode, outputNode, colorPalette); + color = this.getLinkColor(link, inputNode, outputNode, colorPalette); } - originalRenderLink.call( + // call the original renderLink function + this.originalRenderLink.call( this, ctx, a, From 53fba56ee5fa48261d8caec46927c34923a168fd Mon Sep 17 00:00:00 2001 From: EllangoK Date: Sat, 8 Apr 2023 13:18:23 -0400 Subject: [PATCH 41/42] assign link_type_colors directly --- web/extensions/core/colorPalette.js | 63 +---------------------------- 1 file changed, 1 insertion(+), 62 deletions(-) diff --git a/web/extensions/core/colorPalette.js b/web/extensions/core/colorPalette.js index c1473b93f..a08d46684 100644 --- a/web/extensions/core/colorPalette.js +++ b/web/extensions/core/colorPalette.js @@ -238,7 +238,7 @@ app.registerExtension({ if (colorPalette.colors) { if (colorPalette.colors.node_slot) { Object.assign(app.canvas.default_connection_color_byType, colorPalette.colors.node_slot); - customizeRenderLink(colorPalette); + Object.assign(LGraphCanvas.link_type_colors, colorPalette.colors.node_slot); } if (colorPalette.colors.litegraph_base) { // Everything updates correctly in the loop, except the Node Title and Link Color for some reason @@ -407,64 +407,3 @@ app.registerExtension({ }); }, }); - -function customizeRenderLink(colorPalette) { - var LGraphCanvas = LiteGraph.LGraphCanvas; - - if (!LGraphCanvas.prototype.getLinkColor) { - LGraphCanvas.prototype.getLinkColor = function(link, inputNode, outputNode, colorPalette) { - let color = null; - if (link && link.color) { - color = link.color; - } else if (link) { - const matchingEntry = inputNode.outputs.find((output) => { - return outputNode.inputs.some((input) => input.type === output.type); - }); - - if (matchingEntry) { - let nodeType = matchingEntry.type; - color = colorPalette.colors.node_slot[nodeType]; - } - } - return color; - }; - } - - if (!LGraphCanvas.prototype.originalRenderLink) { - LGraphCanvas.prototype.originalRenderLink = LGraphCanvas.prototype.renderLink; - } - - LGraphCanvas.prototype.renderLink = function( - ctx, - a, - b, - link, - skip_border, - flow, - color, - start_dir, - end_dir, - num_sublines - ) { - if (link) { - const inputNode = this.graph.getNodeById(link.origin_id); - const outputNode = this.graph.getNodeById(link.target_id); - color = this.getLinkColor(link, inputNode, outputNode, colorPalette); - } - - // call the original renderLink function - this.originalRenderLink.call( - this, - ctx, - a, - b, - link, - skip_border, - flow, - color, - start_dir, - end_dir, - num_sublines - ); - }; -} From 24d53992c61650bc2919acc06fbfbb625468bde8 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sat, 8 Apr 2023 15:53:01 -0400 Subject: [PATCH 42/42] Rename. --- nodes.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nodes.py b/nodes.py index 63cf5e448..14a73bcd7 100644 --- a/nodes.py +++ b/nodes.py @@ -1109,8 +1109,8 @@ NODE_DISPLAY_NAME_MAPPINGS = { "KSampler": "KSampler", "KSamplerAdvanced": "KSampler (Advanced)", # Loaders - "CheckpointLoader": "Load Checkpoint", - "CheckpointLoaderSimple": "Load Checkpoint (Simple)", + "CheckpointLoader": "Load Checkpoint (With Config)", + "CheckpointLoaderSimple": "Load Checkpoint", "VAELoader": "Load VAE", "LoraLoader": "Load LoRA", "CLIPLoader": "Load CLIP",