diff --git a/comfy/cli_args.py b/comfy/cli_args.py index 3a0a26d07..65b879907 100644 --- a/comfy/cli_args.py +++ b/comfy/cli_args.py @@ -92,6 +92,8 @@ class LatentPreviewMethod(enum.Enum): parser.add_argument("--preview-method", type=LatentPreviewMethod, default=LatentPreviewMethod.NoPreviews, help="Default preview method for sampler nodes.", action=EnumAction) +parser.add_argument("--preview-size", type=int, default=512, help="Sets the maximum preview size for sampler nodes.") + cache_group = parser.add_mutually_exclusive_group() cache_group.add_argument("--cache-classic", action="store_true", help="Use the old style (aggressive) caching.") cache_group.add_argument("--cache-lru", type=int, default=0, help="Use LRU caching with a maximum of N node results cached. May use more RAM/VRAM.") diff --git a/comfy/lora.py b/comfy/lora.py index d770d9b40..eb95d02ab 100644 --- a/comfy/lora.py +++ b/comfy/lora.py @@ -528,20 +528,40 @@ def calculate_weight(patches, weight, key, intermediate_dtype=torch.float32): except Exception as e: logging.error("ERROR {} {} {}".format(patch_type, key, e)) elif patch_type == "glora": - if v[4] is not None: - alpha = v[4] / v[0].shape[0] - else: - alpha = 1.0 - dora_scale = v[5] + old_glora = False + if v[3].shape[1] == v[2].shape[0] == v[0].shape[0] == v[1].shape[1]: + rank = v[0].shape[0] + old_glora = True + + if v[3].shape[0] == v[2].shape[1] == v[0].shape[1] == v[1].shape[0]: + if old_glora and v[1].shape[0] == weight.shape[0] and weight.shape[0] == weight.shape[1]: + pass + else: + old_glora = False + rank = v[1].shape[0] + a1 = comfy.model_management.cast_to_device(v[0].flatten(start_dim=1), weight.device, intermediate_dtype) a2 = comfy.model_management.cast_to_device(v[1].flatten(start_dim=1), weight.device, intermediate_dtype) b1 = comfy.model_management.cast_to_device(v[2].flatten(start_dim=1), weight.device, intermediate_dtype) b2 = comfy.model_management.cast_to_device(v[3].flatten(start_dim=1), weight.device, intermediate_dtype) + if v[4] is not None: + alpha = v[4] / rank + else: + alpha = 1.0 + try: - lora_diff = (torch.mm(b2, b1) + torch.mm(torch.mm(weight.flatten(start_dim=1).to(dtype=intermediate_dtype), a2), a1)).reshape(weight.shape) + if old_glora: + lora_diff = (torch.mm(b2, b1) + torch.mm(torch.mm(weight.flatten(start_dim=1).to(dtype=intermediate_dtype), a2), a1)).reshape(weight.shape) #old lycoris glora + else: + if weight.dim() > 2: + lora_diff = torch.einsum("o i ..., i j -> o j ...", torch.einsum("o i ..., i j -> o j ...", weight.to(dtype=intermediate_dtype), a1), a2).reshape(weight.shape) + else: + lora_diff = torch.mm(torch.mm(weight.to(dtype=intermediate_dtype), a1), a2).reshape(weight.shape) + lora_diff += torch.mm(b1, b2).reshape(weight.shape) + if dora_scale is not None: weight = function(weight_decompose(dora_scale, weight, lora_diff, alpha, strength, intermediate_dtype)) else: diff --git a/latent_preview.py b/latent_preview.py index ae6c106e4..e14c72ce4 100644 --- a/latent_preview.py +++ b/latent_preview.py @@ -9,7 +9,7 @@ import folder_paths import comfy.utils import logging -MAX_PREVIEW_RESOLUTION = 512 +MAX_PREVIEW_RESOLUTION = args.preview_size def preview_to_image(latent_image): latents_ubyte = (((latent_image + 1.0) / 2.0).clamp(0, 1) # change scale from -1..1 to 0..1