From ea77750759cc213d7c30ebb6d42c026bf23b829c Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sat, 7 Sep 2024 02:13:13 -0400 Subject: [PATCH 1/4] Support a generic Comfy format for text encoder loras. This is a format with keys like: text_encoders.clip_l.transformer.text_model.encoder.layers.9.self_attn.v_proj.lora_up.weight Instead of waiting for me to add support for specific lora formats you can convert your text encoder loras to this format instead. If you want to see an example save a text encoder lora with the SaveLora node with the commit right after this one. --- comfy/lora.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/comfy/lora.py b/comfy/lora.py index eb95d02ab..ad951bbaf 100644 --- a/comfy/lora.py +++ b/comfy/lora.py @@ -201,6 +201,9 @@ def load_lora(lora, to_load): def model_lora_keys_clip(model, key_map={}): sdk = model.state_dict().keys() + for k in sdk: + if k.endswith(".weight"): + key_map["text_encoders.{}".format(k[:-len(".weight")])] = k #generic lora format without any weird key names text_model_lora_key = "lora_te_text_model_encoder_layers_{}_{}" clip_l_present = False From 9bfee687733f43cbc8c441ee579693f27c4d50f6 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sat, 7 Sep 2024 02:30:12 -0400 Subject: [PATCH 2/4] LoraSave node now supports generating text encoder loras. text_encoder_diff should be connected to a CLIPMergeSubtract node. model_diff and text_encoder_diff are optional inputs so you can create model only loras, text encoder only loras or a lora that contains both. --- comfy_extras/nodes_lora_extract.py | 45 +++++++++++++++++------------- 1 file changed, 25 insertions(+), 20 deletions(-) diff --git a/comfy_extras/nodes_lora_extract.py b/comfy_extras/nodes_lora_extract.py index dcb46f0e0..76330053f 100644 --- a/comfy_extras/nodes_lora_extract.py +++ b/comfy_extras/nodes_lora_extract.py @@ -38,6 +38,23 @@ def extract_lora(diff, rank): Vh = Vh.reshape(rank, in_dim, kernel_size[0], kernel_size[1]) return (U, Vh) +def calc_lora_model(model_diff, rank, prefix_model, prefix_lora, output_sd): + comfy.model_management.load_models_gpu([model_diff], force_patch_weights=True) + sd = model_diff.model_state_dict(filter_prefix=prefix_model) + + for k in sd: + if k.endswith(".weight"): + weight_diff = sd[k] + if weight_diff.ndim < 2: + continue + try: + out = extract_lora(weight_diff, rank) + output_sd["{}{}.lora_up.weight".format(prefix_lora, k[len(prefix_model):-7])] = out[0].contiguous().half().cpu() + output_sd["{}{}.lora_down.weight".format(prefix_lora, k[len(prefix_model):-7])] = out[1].contiguous().half().cpu() + except: + logging.warning("Could not generate lora weights for key {}, is the weight difference a zero?".format(k)) + return output_sd + class LoraSave: def __init__(self): self.output_dir = folder_paths.get_output_directory() @@ -47,7 +64,8 @@ class LoraSave: return {"required": {"filename_prefix": ("STRING", {"default": "loras/ComfyUI_extracted_lora"}), "rank": ("INT", {"default": 8, "min": 1, "max": 1024, "step": 1}), }, - "optional": {"model_diff": ("MODEL",),}, + "optional": {"model_diff": ("MODEL",), + "text_encoder_diff": ("CLIP",)}, } RETURN_TYPES = () FUNCTION = "save" @@ -55,30 +73,17 @@ class LoraSave: CATEGORY = "_for_testing" - def save(self, filename_prefix, rank, model_diff=None): - if model_diff is None: + def save(self, filename_prefix, rank, model_diff=None, text_encoder_diff=None): + if model_diff is None and text_encoder_diff is None: return {} full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir) output_sd = {} - prefix_key = "diffusion_model." - stored = set() - - comfy.model_management.load_models_gpu([model_diff], force_patch_weights=True) - sd = model_diff.model_state_dict(filter_prefix=prefix_key) - - for k in sd: - if k.endswith(".weight"): - weight_diff = sd[k] - if weight_diff.ndim < 2: - continue - try: - out = extract_lora(weight_diff, rank) - output_sd["{}.lora_up.weight".format(k[:-7])] = out[0].contiguous().half().cpu() - output_sd["{}.lora_down.weight".format(k[:-7])] = out[1].contiguous().half().cpu() - except: - logging.warning("Could not generate lora weights for key {}, is the weight difference a zero?".format(k)) + if model_diff is not None: + output_sd = calc_lora_model(model_diff, rank, "diffusion_model.", "diffusion_model.", output_sd) + if text_encoder_diff is not None: + output_sd = calc_lora_model(text_encoder_diff.patcher, rank, "", "text_encoders.", output_sd) output_checkpoint = f"{filename}_{counter:05}_.safetensors" output_checkpoint = os.path.join(full_output_folder, output_checkpoint) From a09b29ca1124da1bf362f9db485264078d3a5578 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sat, 7 Sep 2024 02:56:24 -0400 Subject: [PATCH 3/4] Add an option to the SaveLora node to store the bias diff. --- comfy_extras/nodes_lora_extract.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/comfy_extras/nodes_lora_extract.py b/comfy_extras/nodes_lora_extract.py index 76330053f..1523082ba 100644 --- a/comfy_extras/nodes_lora_extract.py +++ b/comfy_extras/nodes_lora_extract.py @@ -38,7 +38,7 @@ def extract_lora(diff, rank): Vh = Vh.reshape(rank, in_dim, kernel_size[0], kernel_size[1]) return (U, Vh) -def calc_lora_model(model_diff, rank, prefix_model, prefix_lora, output_sd): +def calc_lora_model(model_diff, rank, prefix_model, prefix_lora, output_sd, bias_diff=False): comfy.model_management.load_models_gpu([model_diff], force_patch_weights=True) sd = model_diff.model_state_dict(filter_prefix=prefix_model) @@ -53,6 +53,8 @@ def calc_lora_model(model_diff, rank, prefix_model, prefix_lora, output_sd): output_sd["{}{}.lora_down.weight".format(prefix_lora, k[len(prefix_model):-7])] = out[1].contiguous().half().cpu() except: logging.warning("Could not generate lora weights for key {}, is the weight difference a zero?".format(k)) + elif bias_diff and k.endswith(".bias"): + output_sd["{}{}.diff_b".format(prefix_lora, k[len(prefix_model):-5])] = sd[k].contiguous().half().cpu() return output_sd class LoraSave: @@ -62,7 +64,9 @@ class LoraSave: @classmethod def INPUT_TYPES(s): return {"required": {"filename_prefix": ("STRING", {"default": "loras/ComfyUI_extracted_lora"}), - "rank": ("INT", {"default": 8, "min": 1, "max": 1024, "step": 1}), + "rank": ("INT", {"default": 8, "min": 1, "max": 4096, "step": 1}), + "lora_type": (["standard"],), + "bias_diff": ("BOOLEAN", {"default": True}), }, "optional": {"model_diff": ("MODEL",), "text_encoder_diff": ("CLIP",)}, @@ -73,7 +77,7 @@ class LoraSave: CATEGORY = "_for_testing" - def save(self, filename_prefix, rank, model_diff=None, text_encoder_diff=None): + def save(self, filename_prefix, rank, lora_type, bias_diff, model_diff=None, text_encoder_diff=None): if model_diff is None and text_encoder_diff is None: return {} @@ -81,9 +85,9 @@ class LoraSave: output_sd = {} if model_diff is not None: - output_sd = calc_lora_model(model_diff, rank, "diffusion_model.", "diffusion_model.", output_sd) + output_sd = calc_lora_model(model_diff, rank, "diffusion_model.", "diffusion_model.", output_sd, bias_diff=bias_diff) if text_encoder_diff is not None: - output_sd = calc_lora_model(text_encoder_diff.patcher, rank, "", "text_encoders.", output_sd) + output_sd = calc_lora_model(text_encoder_diff.patcher, rank, "", "text_encoders.", output_sd, bias_diff=bias_diff) output_checkpoint = f"{filename}_{counter:05}_.safetensors" output_checkpoint = os.path.join(full_output_folder, output_checkpoint) From 8aabd7c8c053aa92aeca8661537175a4b6a6676d Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sat, 7 Sep 2024 03:21:02 -0400 Subject: [PATCH 4/4] SaveLora node can now save "full diff" lora format. This isn't actually a lora format and is saving the full diff of the weights in a format that can be used in the lora loader nodes. --- comfy_extras/nodes_lora_extract.py | 39 +++++++++++++++++++++--------- 1 file changed, 27 insertions(+), 12 deletions(-) diff --git a/comfy_extras/nodes_lora_extract.py b/comfy_extras/nodes_lora_extract.py index 1523082ba..3c2f179d3 100644 --- a/comfy_extras/nodes_lora_extract.py +++ b/comfy_extras/nodes_lora_extract.py @@ -4,6 +4,7 @@ import comfy.utils import folder_paths import os import logging +from enum import Enum CLAMP_QUANTILE = 0.99 @@ -38,21 +39,34 @@ def extract_lora(diff, rank): Vh = Vh.reshape(rank, in_dim, kernel_size[0], kernel_size[1]) return (U, Vh) -def calc_lora_model(model_diff, rank, prefix_model, prefix_lora, output_sd, bias_diff=False): +class LORAType(Enum): + STANDARD = 0 + FULL_DIFF = 1 + +LORA_TYPES = {"standard": LORAType.STANDARD, + "full_diff": LORAType.FULL_DIFF} + +def calc_lora_model(model_diff, rank, prefix_model, prefix_lora, output_sd, lora_type, bias_diff=False): comfy.model_management.load_models_gpu([model_diff], force_patch_weights=True) sd = model_diff.model_state_dict(filter_prefix=prefix_model) for k in sd: if k.endswith(".weight"): weight_diff = sd[k] - if weight_diff.ndim < 2: - continue - try: - out = extract_lora(weight_diff, rank) - output_sd["{}{}.lora_up.weight".format(prefix_lora, k[len(prefix_model):-7])] = out[0].contiguous().half().cpu() - output_sd["{}{}.lora_down.weight".format(prefix_lora, k[len(prefix_model):-7])] = out[1].contiguous().half().cpu() - except: - logging.warning("Could not generate lora weights for key {}, is the weight difference a zero?".format(k)) + if lora_type == LORAType.STANDARD: + if weight_diff.ndim < 2: + if bias_diff: + output_sd["{}{}.diff".format(prefix_lora, k[len(prefix_model):-7])] = weight_diff.contiguous().half().cpu() + continue + try: + out = extract_lora(weight_diff, rank) + output_sd["{}{}.lora_up.weight".format(prefix_lora, k[len(prefix_model):-7])] = out[0].contiguous().half().cpu() + output_sd["{}{}.lora_down.weight".format(prefix_lora, k[len(prefix_model):-7])] = out[1].contiguous().half().cpu() + except: + logging.warning("Could not generate lora weights for key {}, is the weight difference a zero?".format(k)) + elif lora_type == LORAType.FULL_DIFF: + output_sd["{}{}.diff".format(prefix_lora, k[len(prefix_model):-7])] = weight_diff.contiguous().half().cpu() + elif bias_diff and k.endswith(".bias"): output_sd["{}{}.diff_b".format(prefix_lora, k[len(prefix_model):-5])] = sd[k].contiguous().half().cpu() return output_sd @@ -65,7 +79,7 @@ class LoraSave: def INPUT_TYPES(s): return {"required": {"filename_prefix": ("STRING", {"default": "loras/ComfyUI_extracted_lora"}), "rank": ("INT", {"default": 8, "min": 1, "max": 4096, "step": 1}), - "lora_type": (["standard"],), + "lora_type": (tuple(LORA_TYPES.keys()),), "bias_diff": ("BOOLEAN", {"default": True}), }, "optional": {"model_diff": ("MODEL",), @@ -81,13 +95,14 @@ class LoraSave: if model_diff is None and text_encoder_diff is None: return {} + lora_type = LORA_TYPES.get(lora_type) full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir) output_sd = {} if model_diff is not None: - output_sd = calc_lora_model(model_diff, rank, "diffusion_model.", "diffusion_model.", output_sd, bias_diff=bias_diff) + output_sd = calc_lora_model(model_diff, rank, "diffusion_model.", "diffusion_model.", output_sd, lora_type, bias_diff=bias_diff) if text_encoder_diff is not None: - output_sd = calc_lora_model(text_encoder_diff.patcher, rank, "", "text_encoders.", output_sd, bias_diff=bias_diff) + output_sd = calc_lora_model(text_encoder_diff.patcher, rank, "", "text_encoders.", output_sd, lora_type, bias_diff=bias_diff) output_checkpoint = f"{filename}_{counter:05}_.safetensors" output_checkpoint = os.path.join(full_output_folder, output_checkpoint)