From 5190aa284d501182196672865703a0c659fe20c1 Mon Sep 17 00:00:00 2001 From: melMass Date: Fri, 21 Jul 2023 13:19:05 +0200 Subject: [PATCH 1/6] =?UTF-8?q?fix:=20=E2=9A=A1=EF=B8=8F=20small=20type=20?= =?UTF-8?q?fix?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit getCustomWidgets expects a plain record and not an array of records --- web/types/comfy.d.ts | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/web/types/comfy.d.ts b/web/types/comfy.d.ts index 8444e13a8..f7129b555 100644 --- a/web/types/comfy.d.ts +++ b/web/types/comfy.d.ts @@ -30,9 +30,7 @@ export interface ComfyExtension { getCustomWidgets( app: ComfyApp ): Promise< - Array< - Record { widget?: IWidget; minWidth?: number; minHeight?: number }> - > + Record { widget?: IWidget; minWidth?: number; minHeight?: number }> >; /** * Allows the extension to add additional handling to the node before it is registered with LGraph From 7c0a5a3e0ee7afa69db8f6f0d47e409c3f0c43d1 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 25 Jul 2023 00:09:01 -0400 Subject: [PATCH 2/6] Disable cuda malloc on a bunch of quadro cards. --- cuda_malloc.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/cuda_malloc.py b/cuda_malloc.py index 250d827da..a808b2071 100644 --- a/cuda_malloc.py +++ b/cuda_malloc.py @@ -37,7 +37,11 @@ def get_gpu_names(): return set() def cuda_malloc_supported(): - blacklist = {"GeForce GTX TITAN X", "GeForce GTX 980", "GeForce GTX 970", "GeForce GTX 960", "GeForce GTX 950", "GeForce 945M", "GeForce 940M", "GeForce 930M", "GeForce 920M", "GeForce 910M", "GeForce GTX 750", "GeForce GTX 745"} + blacklist = {"GeForce GTX TITAN X", "GeForce GTX 980", "GeForce GTX 970", "GeForce GTX 960", "GeForce GTX 950", "GeForce 945M", + "GeForce 940M", "GeForce 930M", "GeForce 920M", "GeForce 910M", "GeForce GTX 750", "GeForce GTX 745", "Quadro K620", + "Quadro K1200", "Quadro K2200", "Quadro M500", "Quadro M520", "Quadro M600", "Quadro M620", "Quadro M1000", + "Quadro M1200", "Quadro M2000", "Quadro M2200", "Quadro M3000", "Quadro M4000", "Quadro M5000", "Quadro M5500", "Quadro M6000"} + try: names = get_gpu_names() except: From 4f9b6f39d12082c1ee310b232ef093d1f048af64 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 25 Jul 2023 00:45:20 -0400 Subject: [PATCH 3/6] Fix potential issue with Save Checkpoint. --- comfy/supported_models.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/comfy/supported_models.py b/comfy/supported_models.py index 915214081..b1c01fe87 100644 --- a/comfy/supported_models.py +++ b/comfy/supported_models.py @@ -126,7 +126,8 @@ class SDXLRefiner(supported_models_base.BASE): def process_clip_state_dict_for_saving(self, state_dict): replace_prefix = {} state_dict_g = diffusers_convert.convert_text_enc_state_dict_v20(state_dict, "clip_g") - state_dict_g.pop("clip_g.transformer.text_model.embeddings.position_ids") + if "clip_g.transformer.text_model.embeddings.position_ids" in state_dict_g: + state_dict_g.pop("clip_g.transformer.text_model.embeddings.position_ids") replace_prefix["clip_g"] = "conditioner.embedders.0.model" state_dict_g = supported_models_base.state_dict_prefix_replace(state_dict_g, replace_prefix) return state_dict_g @@ -171,7 +172,8 @@ class SDXL(supported_models_base.BASE): replace_prefix = {} keys_to_replace = {} state_dict_g = diffusers_convert.convert_text_enc_state_dict_v20(state_dict, "clip_g") - state_dict_g.pop("clip_g.transformer.text_model.embeddings.position_ids") + if "clip_g.transformer.text_model.embeddings.position_ids" in state_dict_g: + state_dict_g.pop("clip_g.transformer.text_model.embeddings.position_ids") for k in state_dict: if k.startswith("clip_l"): state_dict_g[k] = state_dict[k] From 315ba30c81ce73f94b44e74b6c1dbc8951c8720f Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 25 Jul 2023 15:44:49 -0400 Subject: [PATCH 4/6] Update nightly ROCm pytorch command in readme to 5.6 --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 5e32a74f3..ad85d3d49 100644 --- a/README.md +++ b/README.md @@ -93,8 +93,8 @@ AMD users can install rocm and pytorch with pip if you don't have it already ins ```pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/rocm5.4.2``` -This is the command to install the nightly with ROCm 5.5 that supports the 7000 series and might have some performance improvements: -```pip install --pre torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/rocm5.5 -r requirements.txt``` +This is the command to install the nightly with ROCm 5.6 that supports the 7000 series and might have some performance improvements: +```pip install --pre torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/rocm5.6 -r requirements.txt``` ### NVIDIA From 727588d076e45f7130af0b4fe61cb30be5a5328c Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 25 Jul 2023 16:39:15 -0400 Subject: [PATCH 5/6] Fix some new loras. --- comfy/sd.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/comfy/sd.py b/comfy/sd.py index 64ab7cc63..70701ab6b 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -170,6 +170,8 @@ def model_lora_keys_clip(model, key_map={}): if k in sdk: lora_key = text_model_lora_key.format(b, LORA_CLIP_MAP[c]) key_map[lora_key] = k + lora_key = "lora_te1_text_model_encoder_layers_{}_{}".format(b, LORA_CLIP_MAP[c]) + key_map[lora_key] = k k = "clip_l.transformer.text_model.encoder.layers.{}.{}.weight".format(b, c) if k in sdk: From 5e3ac1928aec03009e22a3d79e1ba9b3f16a738e Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 25 Jul 2023 22:02:26 -0400 Subject: [PATCH 6/6] Implement modelspec metadata in CheckpointSave for SDXL and refiner. --- comfy_extras/nodes_model_merging.py | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/comfy_extras/nodes_model_merging.py b/comfy_extras/nodes_model_merging.py index 95c4cfece..6146c4500 100644 --- a/comfy_extras/nodes_model_merging.py +++ b/comfy_extras/nodes_model_merging.py @@ -1,5 +1,7 @@ import comfy.sd import comfy.utils +import comfy.model_base + import folder_paths import json import os @@ -100,6 +102,31 @@ class CheckpointSave: prompt_info = json.dumps(prompt) metadata = {"prompt": prompt_info} + + + enable_modelspec = True + if isinstance(model.model, comfy.model_base.SDXL): + metadata["modelspec.architecture"] = "stable-diffusion-xl-v1-base" + elif isinstance(model.model, comfy.model_base.SDXLRefiner): + metadata["modelspec.architecture"] = "stable-diffusion-xl-v1-refiner" + else: + enable_modelspec = False + + if enable_modelspec: + metadata["modelspec.sai_model_spec"] = "1.0.0" + metadata["modelspec.implementation"] = "sgm" + metadata["modelspec.title"] = "{} {}".format(filename, counter) + + #TODO: + # "stable-diffusion-v1", "stable-diffusion-v1-inpainting", "stable-diffusion-v2-512", + # "stable-diffusion-v2-768-v", "stable-diffusion-v2-unclip-l", "stable-diffusion-v2-unclip-h", + # "v2-inpainting" + + if model.model.model_type == comfy.model_base.ModelType.EPS: + metadata["modelspec.predict_key"] = "epsilon" + elif model.model.model_type == comfy.model_base.ModelType.V_PREDICTION: + metadata["modelspec.predict_key"] = "v" + if extra_pnginfo is not None: for x in extra_pnginfo: metadata[x] = json.dumps(extra_pnginfo[x])