From 76d53c4622fc06372975ed2a43ad345935b8a551 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 18 Aug 2023 11:13:29 -0400 Subject: [PATCH 01/26] Add support for clip g vision model to CLIPVisionLoader. --- comfy/clip_vision.py | 13 ++++++++++--- comfy/clip_vision_config_g.json | 18 ++++++++++++++++++ 2 files changed, 28 insertions(+), 3 deletions(-) create mode 100644 comfy/clip_vision_config_g.json diff --git a/comfy/clip_vision.py b/comfy/clip_vision.py index 2c8603bbe..a887e51b5 100644 --- a/comfy/clip_vision.py +++ b/comfy/clip_vision.py @@ -50,18 +50,22 @@ def convert_to_transformers(sd, prefix): if "{}proj".format(prefix) in sd_k: sd['visual_projection.weight'] = sd.pop("{}proj".format(prefix)).transpose(0, 1) - sd = transformers_convert(sd, prefix, "vision_model.", 32) + sd = transformers_convert(sd, prefix, "vision_model.", 48) return sd def load_clipvision_from_sd(sd, prefix="", convert_keys=False): if convert_keys: sd = convert_to_transformers(sd, prefix) - if "vision_model.encoder.layers.30.layer_norm1.weight" in sd: + if "vision_model.encoder.layers.47.layer_norm1.weight" in sd: + json_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), "clip_vision_config_g.json") + elif "vision_model.encoder.layers.30.layer_norm1.weight" in sd: json_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), "clip_vision_config_h.json") else: json_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), "clip_vision_config_vitl.json") clip = ClipVisionModel(json_config) m, u = clip.load_sd(sd) + if len(m) > 0: + print("missing clip vision:", m) u = set(u) keys = list(sd.keys()) for k in keys: @@ -72,4 +76,7 @@ def load_clipvision_from_sd(sd, prefix="", convert_keys=False): def load(ckpt_path): sd = load_torch_file(ckpt_path) - return load_clipvision_from_sd(sd) + if "visual.transformer.resblocks.0.attn.in_proj_weight" in sd: + return load_clipvision_from_sd(sd, prefix="visual.", convert_keys=True) + else: + return load_clipvision_from_sd(sd) diff --git a/comfy/clip_vision_config_g.json b/comfy/clip_vision_config_g.json new file mode 100644 index 000000000..708e7e21a --- /dev/null +++ b/comfy/clip_vision_config_g.json @@ -0,0 +1,18 @@ +{ + "attention_dropout": 0.0, + "dropout": 0.0, + "hidden_act": "gelu", + "hidden_size": 1664, + "image_size": 224, + "initializer_factor": 1.0, + "initializer_range": 0.02, + "intermediate_size": 8192, + "layer_norm_eps": 1e-05, + "model_type": "clip_vision_model", + "num_attention_heads": 16, + "num_channels": 3, + "num_hidden_layers": 48, + "patch_size": 14, + "projection_dim": 1280, + "torch_dtype": "float32" +} From 39ac856a3360af46f1f5fa3e2d2549b5d90b577b Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 18 Aug 2023 02:39:23 -0400 Subject: [PATCH 02/26] ReVision support: unclip nodes can now be used with SDXL. --- comfy/model_base.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/comfy/model_base.py b/comfy/model_base.py index ad661ec7d..979e2c65e 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -148,13 +148,20 @@ class SDInpaint(BaseModel): super().__init__(model_config, model_type, device=device) self.concat_keys = ("mask", "masked_image") +def sdxl_pooled(args, noise_augmentor): + if "unclip_conditioning" in args: + return unclip_adm(args.get("unclip_conditioning", None), args["device"], noise_augmentor)[:,:1280] + else: + return args["pooled_output"] + class SDXLRefiner(BaseModel): def __init__(self, model_config, model_type=ModelType.EPS, device=None): super().__init__(model_config, model_type, device=device) self.embedder = Timestep(256) + self.noise_augmentor = CLIPEmbeddingNoiseAugmentation(**{"noise_schedule_config": {"timesteps": 1000, "beta_schedule": "squaredcos_cap_v2"}, "timestep_dim": 1280}) def encode_adm(self, **kwargs): - clip_pooled = kwargs["pooled_output"] + clip_pooled = sdxl_pooled(kwargs, self.noise_augmentor) width = kwargs.get("width", 768) height = kwargs.get("height", 768) crop_w = kwargs.get("crop_w", 0) @@ -178,9 +185,10 @@ class SDXL(BaseModel): def __init__(self, model_config, model_type=ModelType.EPS, device=None): super().__init__(model_config, model_type, device=device) self.embedder = Timestep(256) + self.noise_augmentor = CLIPEmbeddingNoiseAugmentation(**{"noise_schedule_config": {"timesteps": 1000, "beta_schedule": "squaredcos_cap_v2"}, "timestep_dim": 1280}) def encode_adm(self, **kwargs): - clip_pooled = kwargs["pooled_output"] + clip_pooled = sdxl_pooled(kwargs, self.noise_augmentor) width = kwargs.get("width", 768) height = kwargs.get("height", 768) crop_w = kwargs.get("crop_w", 0) From d6e4b342e6171e0c89fed1ec9998b608888af401 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 18 Aug 2023 02:46:11 -0400 Subject: [PATCH 03/26] Support for Control Loras. Control loras are controlnets where some of the weights are stored in "lora" format: an up and a down low rank matrice that when multiplied together and added to the unet weight give the controlnet weight. This allows a much smaller memory footprint depending on the rank of the matrices. These controlnets are used just like regular ones. --- comfy/cldm/cldm.py | 54 +++++---- comfy/ldm/modules/attention.py | 83 +++++++------ .../modules/diffusionmodules/openaimodel.py | 54 +++++---- comfy/ops.py | 5 + comfy/samplers.py | 2 +- comfy/sd.py | 110 ++++++++++++++++++ 6 files changed, 216 insertions(+), 92 deletions(-) diff --git a/comfy/cldm/cldm.py b/comfy/cldm/cldm.py index 46fbf0a69..5201b3c26 100644 --- a/comfy/cldm/cldm.py +++ b/comfy/cldm/cldm.py @@ -6,8 +6,6 @@ import torch as th import torch.nn as nn from ..ldm.modules.diffusionmodules.util import ( - conv_nd, - linear, zero_module, timestep_embedding, ) @@ -15,7 +13,7 @@ from ..ldm.modules.diffusionmodules.util import ( from ..ldm.modules.attention import SpatialTransformer from ..ldm.modules.diffusionmodules.openaimodel import UNetModel, TimestepEmbedSequential, ResBlock, Downsample from ..ldm.util import exists - +import comfy.ops class ControlledUnetModel(UNetModel): #implemented in the ldm unet @@ -55,6 +53,8 @@ class ControlNet(nn.Module): use_linear_in_transformer=False, adm_in_channels=None, transformer_depth_middle=None, + device=None, + operations=comfy.ops, ): super().__init__() assert use_spatial_transformer == True, "use_spatial_transformer has to be true" @@ -117,9 +117,9 @@ class ControlNet(nn.Module): time_embed_dim = model_channels * 4 self.time_embed = nn.Sequential( - linear(model_channels, time_embed_dim), + operations.Linear(model_channels, time_embed_dim, dtype=self.dtype, device=device), nn.SiLU(), - linear(time_embed_dim, time_embed_dim), + operations.Linear(time_embed_dim, time_embed_dim, dtype=self.dtype, device=device), ) if self.num_classes is not None: @@ -132,9 +132,9 @@ class ControlNet(nn.Module): assert adm_in_channels is not None self.label_emb = nn.Sequential( nn.Sequential( - linear(adm_in_channels, time_embed_dim), + operations.Linear(adm_in_channels, time_embed_dim, dtype=self.dtype, device=device), nn.SiLU(), - linear(time_embed_dim, time_embed_dim), + operations.Linear(time_embed_dim, time_embed_dim, dtype=self.dtype, device=device), ) ) else: @@ -143,28 +143,28 @@ class ControlNet(nn.Module): self.input_blocks = nn.ModuleList( [ TimestepEmbedSequential( - conv_nd(dims, in_channels, model_channels, 3, padding=1) + operations.conv_nd(dims, in_channels, model_channels, 3, padding=1, dtype=self.dtype, device=device) ) ] ) - self.zero_convs = nn.ModuleList([self.make_zero_conv(model_channels)]) + self.zero_convs = nn.ModuleList([self.make_zero_conv(model_channels, operations=operations)]) self.input_hint_block = TimestepEmbedSequential( - conv_nd(dims, hint_channels, 16, 3, padding=1), + operations.conv_nd(dims, hint_channels, 16, 3, padding=1), nn.SiLU(), - conv_nd(dims, 16, 16, 3, padding=1), + operations.conv_nd(dims, 16, 16, 3, padding=1), nn.SiLU(), - conv_nd(dims, 16, 32, 3, padding=1, stride=2), + operations.conv_nd(dims, 16, 32, 3, padding=1, stride=2), nn.SiLU(), - conv_nd(dims, 32, 32, 3, padding=1), + operations.conv_nd(dims, 32, 32, 3, padding=1), nn.SiLU(), - conv_nd(dims, 32, 96, 3, padding=1, stride=2), + operations.conv_nd(dims, 32, 96, 3, padding=1, stride=2), nn.SiLU(), - conv_nd(dims, 96, 96, 3, padding=1), + operations.conv_nd(dims, 96, 96, 3, padding=1), nn.SiLU(), - conv_nd(dims, 96, 256, 3, padding=1, stride=2), + operations.conv_nd(dims, 96, 256, 3, padding=1, stride=2), nn.SiLU(), - zero_module(conv_nd(dims, 256, model_channels, 3, padding=1)) + zero_module(operations.conv_nd(dims, 256, model_channels, 3, padding=1)) ) self._feature_size = model_channels @@ -182,6 +182,7 @@ class ControlNet(nn.Module): dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, + operations=operations ) ] ch = mult * model_channels @@ -204,11 +205,11 @@ class ControlNet(nn.Module): SpatialTransformer( ch, num_heads, dim_head, depth=transformer_depth[level], context_dim=context_dim, disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer, - use_checkpoint=use_checkpoint + use_checkpoint=use_checkpoint, operations=operations ) ) self.input_blocks.append(TimestepEmbedSequential(*layers)) - self.zero_convs.append(self.make_zero_conv(ch)) + self.zero_convs.append(self.make_zero_conv(ch, operations=operations)) self._feature_size += ch input_block_chans.append(ch) if level != len(channel_mult) - 1: @@ -224,16 +225,17 @@ class ControlNet(nn.Module): use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, down=True, + operations=operations ) if resblock_updown else Downsample( - ch, conv_resample, dims=dims, out_channels=out_ch + ch, conv_resample, dims=dims, out_channels=out_ch, operations=operations ) ) ) ch = out_ch input_block_chans.append(ch) - self.zero_convs.append(self.make_zero_conv(ch)) + self.zero_convs.append(self.make_zero_conv(ch, operations=operations)) ds *= 2 self._feature_size += ch @@ -253,11 +255,12 @@ class ControlNet(nn.Module): dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, + operations=operations ), SpatialTransformer( # always uses a self-attn ch, num_heads, dim_head, depth=transformer_depth_middle, context_dim=context_dim, disable_self_attn=disable_middle_self_attn, use_linear=use_linear_in_transformer, - use_checkpoint=use_checkpoint + use_checkpoint=use_checkpoint, operations=operations ), ResBlock( ch, @@ -266,13 +269,14 @@ class ControlNet(nn.Module): dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, + operations=operations ), ) - self.middle_block_out = self.make_zero_conv(ch) + self.middle_block_out = self.make_zero_conv(ch, operations=operations) self._feature_size += ch - def make_zero_conv(self, channels): - return TimestepEmbedSequential(zero_module(conv_nd(self.dims, channels, channels, 1, padding=0))) + def make_zero_conv(self, channels, operations=None): + return TimestepEmbedSequential(zero_module(operations.conv_nd(self.dims, channels, channels, 1, padding=0))) def forward(self, x, hint, timesteps, context, y=None, **kwargs): t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False) diff --git a/comfy/ldm/modules/attention.py b/comfy/ldm/modules/attention.py index 573cea6ac..87a4aa807 100644 --- a/comfy/ldm/modules/attention.py +++ b/comfy/ldm/modules/attention.py @@ -10,7 +10,6 @@ from .diffusionmodules.util import checkpoint from .sub_quadratic_attention import efficient_dot_product_attention from comfy import model_management -import comfy.ops if model_management.xformers_enabled(): import xformers @@ -52,9 +51,9 @@ def init_(tensor): # feedforward class GEGLU(nn.Module): - def __init__(self, dim_in, dim_out, dtype=None, device=None): + def __init__(self, dim_in, dim_out, dtype=None, device=None, operations=None): super().__init__() - self.proj = comfy.ops.Linear(dim_in, dim_out * 2, dtype=dtype, device=device) + self.proj = operations.Linear(dim_in, dim_out * 2, dtype=dtype, device=device) def forward(self, x): x, gate = self.proj(x).chunk(2, dim=-1) @@ -62,19 +61,19 @@ class GEGLU(nn.Module): class FeedForward(nn.Module): - def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0., dtype=None, device=None): + def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0., dtype=None, device=None, operations=None): super().__init__() inner_dim = int(dim * mult) dim_out = default(dim_out, dim) project_in = nn.Sequential( - comfy.ops.Linear(dim, inner_dim, dtype=dtype, device=device), + operations.Linear(dim, inner_dim, dtype=dtype, device=device), nn.GELU() - ) if not glu else GEGLU(dim, inner_dim, dtype=dtype, device=device) + ) if not glu else GEGLU(dim, inner_dim, dtype=dtype, device=device, operations=operations) self.net = nn.Sequential( project_in, nn.Dropout(dropout), - comfy.ops.Linear(inner_dim, dim_out, dtype=dtype, device=device) + operations.Linear(inner_dim, dim_out, dtype=dtype, device=device) ) def forward(self, x): @@ -148,7 +147,7 @@ class SpatialSelfAttention(nn.Module): class CrossAttentionBirchSan(nn.Module): - def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0., dtype=None, device=None): + def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0., dtype=None, device=None, operations=None): super().__init__() inner_dim = dim_head * heads context_dim = default(context_dim, query_dim) @@ -156,12 +155,12 @@ class CrossAttentionBirchSan(nn.Module): self.scale = dim_head ** -0.5 self.heads = heads - self.to_q = comfy.ops.Linear(query_dim, inner_dim, bias=False, dtype=dtype, device=device) - self.to_k = comfy.ops.Linear(context_dim, inner_dim, bias=False, dtype=dtype, device=device) - self.to_v = comfy.ops.Linear(context_dim, inner_dim, bias=False, dtype=dtype, device=device) + self.to_q = operations.Linear(query_dim, inner_dim, bias=False, dtype=dtype, device=device) + self.to_k = operations.Linear(context_dim, inner_dim, bias=False, dtype=dtype, device=device) + self.to_v = operations.Linear(context_dim, inner_dim, bias=False, dtype=dtype, device=device) self.to_out = nn.Sequential( - comfy.ops.Linear(inner_dim, query_dim, dtype=dtype, device=device), + operations.Linear(inner_dim, query_dim, dtype=dtype, device=device), nn.Dropout(dropout) ) @@ -245,7 +244,7 @@ class CrossAttentionBirchSan(nn.Module): class CrossAttentionDoggettx(nn.Module): - def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0., dtype=None, device=None): + def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0., dtype=None, device=None, operations=None): super().__init__() inner_dim = dim_head * heads context_dim = default(context_dim, query_dim) @@ -253,12 +252,12 @@ class CrossAttentionDoggettx(nn.Module): self.scale = dim_head ** -0.5 self.heads = heads - self.to_q = comfy.ops.Linear(query_dim, inner_dim, bias=False, dtype=dtype, device=device) - self.to_k = comfy.ops.Linear(context_dim, inner_dim, bias=False, dtype=dtype, device=device) - self.to_v = comfy.ops.Linear(context_dim, inner_dim, bias=False, dtype=dtype, device=device) + self.to_q = operations.Linear(query_dim, inner_dim, bias=False, dtype=dtype, device=device) + self.to_k = operations.Linear(context_dim, inner_dim, bias=False, dtype=dtype, device=device) + self.to_v = operations.Linear(context_dim, inner_dim, bias=False, dtype=dtype, device=device) self.to_out = nn.Sequential( - comfy.ops.Linear(inner_dim, query_dim, dtype=dtype, device=device), + operations.Linear(inner_dim, query_dim, dtype=dtype, device=device), nn.Dropout(dropout) ) @@ -343,7 +342,7 @@ class CrossAttentionDoggettx(nn.Module): return self.to_out(r2) class CrossAttention(nn.Module): - def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0., dtype=None, device=None): + def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0., dtype=None, device=None, operations=None): super().__init__() inner_dim = dim_head * heads context_dim = default(context_dim, query_dim) @@ -351,12 +350,12 @@ class CrossAttention(nn.Module): self.scale = dim_head ** -0.5 self.heads = heads - self.to_q = comfy.ops.Linear(query_dim, inner_dim, bias=False, dtype=dtype, device=device) - self.to_k = comfy.ops.Linear(context_dim, inner_dim, bias=False, dtype=dtype, device=device) - self.to_v = comfy.ops.Linear(context_dim, inner_dim, bias=False, dtype=dtype, device=device) + self.to_q = operations.Linear(query_dim, inner_dim, bias=False, dtype=dtype, device=device) + self.to_k = operations.Linear(context_dim, inner_dim, bias=False, dtype=dtype, device=device) + self.to_v = operations.Linear(context_dim, inner_dim, bias=False, dtype=dtype, device=device) self.to_out = nn.Sequential( - comfy.ops.Linear(inner_dim, query_dim, dtype=dtype, device=device), + operations.Linear(inner_dim, query_dim, dtype=dtype, device=device), nn.Dropout(dropout) ) @@ -399,7 +398,7 @@ class CrossAttention(nn.Module): class MemoryEfficientCrossAttention(nn.Module): # https://github.com/MatthieuTPHR/diffusers/blob/d80b531ff8060ec1ea982b65a1b8df70f73aa67c/src/diffusers/models/attention.py#L223 - def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.0, dtype=None, device=None): + def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.0, dtype=None, device=None, operations=None): super().__init__() print(f"Setting up {self.__class__.__name__}. Query dim is {query_dim}, context_dim is {context_dim} and using " f"{heads} heads.") @@ -409,11 +408,11 @@ class MemoryEfficientCrossAttention(nn.Module): self.heads = heads self.dim_head = dim_head - self.to_q = comfy.ops.Linear(query_dim, inner_dim, bias=False, dtype=dtype, device=device) - self.to_k = comfy.ops.Linear(context_dim, inner_dim, bias=False, dtype=dtype, device=device) - self.to_v = comfy.ops.Linear(context_dim, inner_dim, bias=False, dtype=dtype, device=device) + self.to_q = operations.Linear(query_dim, inner_dim, bias=False, dtype=dtype, device=device) + self.to_k = operations.Linear(context_dim, inner_dim, bias=False, dtype=dtype, device=device) + self.to_v = operations.Linear(context_dim, inner_dim, bias=False, dtype=dtype, device=device) - self.to_out = nn.Sequential(comfy.ops.Linear(inner_dim, query_dim, dtype=dtype, device=device), nn.Dropout(dropout)) + self.to_out = nn.Sequential(operations.Linear(inner_dim, query_dim, dtype=dtype, device=device), nn.Dropout(dropout)) self.attention_op: Optional[Any] = None def forward(self, x, context=None, value=None, mask=None): @@ -450,7 +449,7 @@ class MemoryEfficientCrossAttention(nn.Module): return self.to_out(out) class CrossAttentionPytorch(nn.Module): - def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0., dtype=None, device=None): + def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0., dtype=None, device=None, operations=None): super().__init__() inner_dim = dim_head * heads context_dim = default(context_dim, query_dim) @@ -458,11 +457,11 @@ class CrossAttentionPytorch(nn.Module): self.heads = heads self.dim_head = dim_head - self.to_q = comfy.ops.Linear(query_dim, inner_dim, bias=False, dtype=dtype, device=device) - self.to_k = comfy.ops.Linear(context_dim, inner_dim, bias=False, dtype=dtype, device=device) - self.to_v = comfy.ops.Linear(context_dim, inner_dim, bias=False, dtype=dtype, device=device) + self.to_q = operations.Linear(query_dim, inner_dim, bias=False, dtype=dtype, device=device) + self.to_k = operations.Linear(context_dim, inner_dim, bias=False, dtype=dtype, device=device) + self.to_v = operations.Linear(context_dim, inner_dim, bias=False, dtype=dtype, device=device) - self.to_out = nn.Sequential(comfy.ops.Linear(inner_dim, query_dim, dtype=dtype, device=device), nn.Dropout(dropout)) + self.to_out = nn.Sequential(operations.Linear(inner_dim, query_dim, dtype=dtype, device=device), nn.Dropout(dropout)) self.attention_op: Optional[Any] = None def forward(self, x, context=None, value=None, mask=None): @@ -508,14 +507,14 @@ else: class BasicTransformerBlock(nn.Module): def __init__(self, dim, n_heads, d_head, dropout=0., context_dim=None, gated_ff=True, checkpoint=True, - disable_self_attn=False, dtype=None, device=None): + disable_self_attn=False, dtype=None, device=None, operations=None): super().__init__() self.disable_self_attn = disable_self_attn self.attn1 = CrossAttention(query_dim=dim, heads=n_heads, dim_head=d_head, dropout=dropout, - context_dim=context_dim if self.disable_self_attn else None, dtype=dtype, device=device) # is a self-attention if not self.disable_self_attn - self.ff = FeedForward(dim, dropout=dropout, glu=gated_ff, dtype=dtype, device=device) + context_dim=context_dim if self.disable_self_attn else None, dtype=dtype, device=device, operations=operations) # is a self-attention if not self.disable_self_attn + self.ff = FeedForward(dim, dropout=dropout, glu=gated_ff, dtype=dtype, device=device, operations=operations) self.attn2 = CrossAttention(query_dim=dim, context_dim=context_dim, - heads=n_heads, dim_head=d_head, dropout=dropout, dtype=dtype, device=device) # is self-attn if context is none + heads=n_heads, dim_head=d_head, dropout=dropout, dtype=dtype, device=device, operations=operations) # is self-attn if context is none self.norm1 = nn.LayerNorm(dim, dtype=dtype, device=device) self.norm2 = nn.LayerNorm(dim, dtype=dtype, device=device) self.norm3 = nn.LayerNorm(dim, dtype=dtype, device=device) @@ -648,7 +647,7 @@ class SpatialTransformer(nn.Module): def __init__(self, in_channels, n_heads, d_head, depth=1, dropout=0., context_dim=None, disable_self_attn=False, use_linear=False, - use_checkpoint=True, dtype=None, device=None): + use_checkpoint=True, dtype=None, device=None, operations=None): super().__init__() if exists(context_dim) and not isinstance(context_dim, list): context_dim = [context_dim] * depth @@ -656,26 +655,26 @@ class SpatialTransformer(nn.Module): inner_dim = n_heads * d_head self.norm = Normalize(in_channels, dtype=dtype, device=device) if not use_linear: - self.proj_in = nn.Conv2d(in_channels, + self.proj_in = operations.Conv2d(in_channels, inner_dim, kernel_size=1, stride=1, padding=0, dtype=dtype, device=device) else: - self.proj_in = comfy.ops.Linear(in_channels, inner_dim, dtype=dtype, device=device) + self.proj_in = operations.Linear(in_channels, inner_dim, dtype=dtype, device=device) self.transformer_blocks = nn.ModuleList( [BasicTransformerBlock(inner_dim, n_heads, d_head, dropout=dropout, context_dim=context_dim[d], - disable_self_attn=disable_self_attn, checkpoint=use_checkpoint, dtype=dtype, device=device) + disable_self_attn=disable_self_attn, checkpoint=use_checkpoint, dtype=dtype, device=device, operations=operations) for d in range(depth)] ) if not use_linear: - self.proj_out = nn.Conv2d(inner_dim,in_channels, + self.proj_out = operations.Conv2d(inner_dim,in_channels, kernel_size=1, stride=1, padding=0, dtype=dtype, device=device) else: - self.proj_out = comfy.ops.Linear(in_channels, inner_dim, dtype=dtype, device=device) + self.proj_out = operations.Linear(in_channels, inner_dim, dtype=dtype, device=device) self.use_linear = use_linear def forward(self, x, context=None, transformer_options={}): diff --git a/comfy/ldm/modules/diffusionmodules/openaimodel.py b/comfy/ldm/modules/diffusionmodules/openaimodel.py index 90c153465..8063adb85 100644 --- a/comfy/ldm/modules/diffusionmodules/openaimodel.py +++ b/comfy/ldm/modules/diffusionmodules/openaimodel.py @@ -8,8 +8,6 @@ import torch.nn.functional as F from .util import ( checkpoint, - conv_nd, - linear, avg_pool_nd, zero_module, normalization, @@ -17,7 +15,7 @@ from .util import ( ) from ..attention import SpatialTransformer from comfy.ldm.util import exists - +import comfy.ops class TimestepBlock(nn.Module): """ @@ -72,14 +70,14 @@ class Upsample(nn.Module): upsampling occurs in the inner-two dimensions. """ - def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1, dtype=None, device=None): + def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1, dtype=None, device=None, operations=None): super().__init__() self.channels = channels self.out_channels = out_channels or channels self.use_conv = use_conv self.dims = dims if use_conv: - self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=padding, dtype=dtype, device=device) + self.conv = operations.conv_nd(dims, self.channels, self.out_channels, 3, padding=padding, dtype=dtype, device=device) def forward(self, x, output_shape=None): assert x.shape[1] == self.channels @@ -108,7 +106,7 @@ class Downsample(nn.Module): downsampling occurs in the inner-two dimensions. """ - def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1, dtype=None, device=None): + def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1, dtype=None, device=None, operations=None): super().__init__() self.channels = channels self.out_channels = out_channels or channels @@ -116,7 +114,7 @@ class Downsample(nn.Module): self.dims = dims stride = 2 if dims != 3 else (1, 2, 2) if use_conv: - self.op = conv_nd( + self.op = operations.conv_nd( dims, self.channels, self.out_channels, 3, stride=stride, padding=padding, dtype=dtype, device=device ) else: @@ -158,6 +156,7 @@ class ResBlock(TimestepBlock): down=False, dtype=None, device=None, + operations=None ): super().__init__() self.channels = channels @@ -171,7 +170,7 @@ class ResBlock(TimestepBlock): self.in_layers = nn.Sequential( nn.GroupNorm(32, channels, dtype=dtype, device=device), nn.SiLU(), - conv_nd(dims, channels, self.out_channels, 3, padding=1, dtype=dtype, device=device), + operations.conv_nd(dims, channels, self.out_channels, 3, padding=1, dtype=dtype, device=device), ) self.updown = up or down @@ -187,7 +186,7 @@ class ResBlock(TimestepBlock): self.emb_layers = nn.Sequential( nn.SiLU(), - linear( + operations.Linear( emb_channels, 2 * self.out_channels if use_scale_shift_norm else self.out_channels, dtype=dtype, device=device ), @@ -197,18 +196,18 @@ class ResBlock(TimestepBlock): nn.SiLU(), nn.Dropout(p=dropout), zero_module( - conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1, dtype=dtype, device=device) + operations.conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1, dtype=dtype, device=device) ), ) if self.out_channels == channels: self.skip_connection = nn.Identity() elif use_conv: - self.skip_connection = conv_nd( + self.skip_connection = operations.conv_nd( dims, channels, self.out_channels, 3, padding=1, dtype=dtype, device=device ) else: - self.skip_connection = conv_nd(dims, channels, self.out_channels, 1, dtype=dtype, device=device) + self.skip_connection = operations.conv_nd(dims, channels, self.out_channels, 1, dtype=dtype, device=device) def forward(self, x, emb): """ @@ -317,6 +316,7 @@ class UNetModel(nn.Module): adm_in_channels=None, transformer_depth_middle=None, device=None, + operations=comfy.ops, ): super().__init__() assert use_spatial_transformer == True, "use_spatial_transformer has to be true" @@ -379,9 +379,9 @@ class UNetModel(nn.Module): time_embed_dim = model_channels * 4 self.time_embed = nn.Sequential( - linear(model_channels, time_embed_dim, dtype=self.dtype, device=device), + operations.Linear(model_channels, time_embed_dim, dtype=self.dtype, device=device), nn.SiLU(), - linear(time_embed_dim, time_embed_dim, dtype=self.dtype, device=device), + operations.Linear(time_embed_dim, time_embed_dim, dtype=self.dtype, device=device), ) if self.num_classes is not None: @@ -394,9 +394,9 @@ class UNetModel(nn.Module): assert adm_in_channels is not None self.label_emb = nn.Sequential( nn.Sequential( - linear(adm_in_channels, time_embed_dim, dtype=self.dtype, device=device), + operations.Linear(adm_in_channels, time_embed_dim, dtype=self.dtype, device=device), nn.SiLU(), - linear(time_embed_dim, time_embed_dim, dtype=self.dtype, device=device), + operations.Linear(time_embed_dim, time_embed_dim, dtype=self.dtype, device=device), ) ) else: @@ -405,7 +405,7 @@ class UNetModel(nn.Module): self.input_blocks = nn.ModuleList( [ TimestepEmbedSequential( - conv_nd(dims, in_channels, model_channels, 3, padding=1, dtype=self.dtype, device=device) + operations.conv_nd(dims, in_channels, model_channels, 3, padding=1, dtype=self.dtype, device=device) ) ] ) @@ -426,6 +426,7 @@ class UNetModel(nn.Module): use_scale_shift_norm=use_scale_shift_norm, dtype=self.dtype, device=device, + operations=operations, ) ] ch = mult * model_channels @@ -447,7 +448,7 @@ class UNetModel(nn.Module): layers.append(SpatialTransformer( ch, num_heads, dim_head, depth=transformer_depth[level], context_dim=context_dim, disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer, - use_checkpoint=use_checkpoint, dtype=self.dtype, device=device + use_checkpoint=use_checkpoint, dtype=self.dtype, device=device, operations=operations ) ) self.input_blocks.append(TimestepEmbedSequential(*layers)) @@ -468,10 +469,11 @@ class UNetModel(nn.Module): down=True, dtype=self.dtype, device=device, + operations=operations ) if resblock_updown else Downsample( - ch, conv_resample, dims=dims, out_channels=out_ch, dtype=self.dtype, device=device + ch, conv_resample, dims=dims, out_channels=out_ch, dtype=self.dtype, device=device, operations=operations ) ) ) @@ -498,11 +500,12 @@ class UNetModel(nn.Module): use_scale_shift_norm=use_scale_shift_norm, dtype=self.dtype, device=device, + operations=operations ), SpatialTransformer( # always uses a self-attn ch, num_heads, dim_head, depth=transformer_depth_middle, context_dim=context_dim, disable_self_attn=disable_middle_self_attn, use_linear=use_linear_in_transformer, - use_checkpoint=use_checkpoint, dtype=self.dtype, device=device + use_checkpoint=use_checkpoint, dtype=self.dtype, device=device, operations=operations ), ResBlock( ch, @@ -513,6 +516,7 @@ class UNetModel(nn.Module): use_scale_shift_norm=use_scale_shift_norm, dtype=self.dtype, device=device, + operations=operations ), ) self._feature_size += ch @@ -532,6 +536,7 @@ class UNetModel(nn.Module): use_scale_shift_norm=use_scale_shift_norm, dtype=self.dtype, device=device, + operations=operations ) ] ch = model_channels * mult @@ -554,7 +559,7 @@ class UNetModel(nn.Module): SpatialTransformer( ch, num_heads, dim_head, depth=transformer_depth[level], context_dim=context_dim, disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer, - use_checkpoint=use_checkpoint, dtype=self.dtype, device=device + use_checkpoint=use_checkpoint, dtype=self.dtype, device=device, operations=operations ) ) if level and i == self.num_res_blocks[level]: @@ -571,9 +576,10 @@ class UNetModel(nn.Module): up=True, dtype=self.dtype, device=device, + operations=operations ) if resblock_updown - else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch, dtype=self.dtype, device=device) + else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch, dtype=self.dtype, device=device, operations=operations) ) ds //= 2 self.output_blocks.append(TimestepEmbedSequential(*layers)) @@ -582,12 +588,12 @@ class UNetModel(nn.Module): self.out = nn.Sequential( nn.GroupNorm(32, ch, dtype=self.dtype, device=device), nn.SiLU(), - zero_module(conv_nd(dims, model_channels, out_channels, 3, padding=1, dtype=self.dtype, device=device)), + zero_module(operations.conv_nd(dims, model_channels, out_channels, 3, padding=1, dtype=self.dtype, device=device)), ) if self.predict_codebook_ids: self.id_predictor = nn.Sequential( nn.GroupNorm(32, ch, dtype=self.dtype, device=device), - conv_nd(dims, model_channels, n_embed, 1, dtype=self.dtype, device=device), + operations.conv_nd(dims, model_channels, n_embed, 1, dtype=self.dtype, device=device), #nn.LogSoftmax(dim=1) # change to cross_entropy and produce non-normalized logits ) diff --git a/comfy/ops.py b/comfy/ops.py index 2e72030bd..678c2c6d0 100644 --- a/comfy/ops.py +++ b/comfy/ops.py @@ -21,6 +21,11 @@ class Conv2d(torch.nn.Conv2d): def reset_parameters(self): return None +def conv_nd(dims, *args, **kwargs): + if dims == 2: + return Conv2d(*args, **kwargs) + else: + raise ValueError(f"unsupported dimensions: {dims}") @contextmanager def use_comfy_ops(): # Kind of an ugly hack but I can't think of a better way diff --git a/comfy/samplers.py b/comfy/samplers.py index ee37913e6..134336de6 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -478,7 +478,7 @@ def pre_run_control(model, conds): timestep_end = None percent_to_timestep_function = lambda a: model.sigma_to_t(model.t_to_sigma(torch.tensor(a) * 999.0)) if 'control' in x[1]: - x[1]['control'].pre_run(model.inner_model, percent_to_timestep_function) + x[1]['control'].pre_run(model.inner_model.inner_model, percent_to_timestep_function) def apply_empty_x_to_equal_area(conds, uncond, name, uncond_fill_func): cond_cnets = [] diff --git a/comfy/sd.py b/comfy/sd.py index 461c234db..48b1a8ce7 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -844,9 +844,119 @@ class ControlNet(ControlBase): out.append(self.control_model_wrapped) return out +class ControlLoraOps: + class Linear(torch.nn.Module): + def __init__(self, in_features: int, out_features: int, bias: bool = True, + device=None, dtype=None) -> None: + factory_kwargs = {'device': device, 'dtype': dtype} + super().__init__() + self.in_features = in_features + self.out_features = out_features + self.weight = None + self.up = None + self.down = None + self.bias = None + + def forward(self, input): + if self.up is not None: + return torch.nn.functional.linear(input, self.weight + (torch.mm(self.up.flatten(start_dim=1), self.down.flatten(start_dim=1))).reshape(self.weight.shape).type(self.weight.dtype), self.bias) + else: + return torch.nn.functional.linear(input, self.weight, self.bias) + + class Conv2d(torch.nn.Module): + def __init__( + self, + in_channels, + out_channels, + kernel_size, + stride=1, + padding=0, + dilation=1, + groups=1, + bias=True, + padding_mode='zeros', + device=None, + dtype=None + ): + super().__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.kernel_size = kernel_size + self.stride = stride + self.padding = padding + self.dilation = dilation + self.transposed = False + self.output_padding = 0 + self.groups = groups + self.padding_mode = padding_mode + + self.weight = None + self.bias = None + self.up = None + self.down = None + + + def forward(self, input): + if self.up is not None: + return torch.nn.functional.conv2d(input, self.weight + (torch.mm(self.up.flatten(start_dim=1), self.down.flatten(start_dim=1))).reshape(self.weight.shape).type(self.weight.dtype), self.bias, self.stride, self.padding, self.dilation, self.groups) + else: + return torch.nn.functional.conv2d(input, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups) + + def conv_nd(self, dims, *args, **kwargs): + if dims == 2: + return self.Conv2d(*args, **kwargs) + else: + raise ValueError(f"unsupported dimensions: {dims}") + + +class ControlLora(ControlNet): + def __init__(self, control_weights, global_average_pooling=False, device=None): + ControlBase.__init__(self, device) + self.control_weights = control_weights + self.global_average_pooling = global_average_pooling + + def pre_run(self, model, percent_to_timestep_function): + super().pre_run(model, percent_to_timestep_function) + controlnet_config = model.model_config.unet_config.copy() + controlnet_config.pop("out_channels") + controlnet_config["hint_channels"] = self.control_weights["input_hint_block.0.weight"].shape[1] + controlnet_config["operations"] = ControlLoraOps() + self.control_model = cldm.ControlNet(**controlnet_config) + if model_management.should_use_fp16(): + self.control_model.half() + self.control_model.to(model_management.get_torch_device()) + diffusion_model = model.diffusion_model + sd = diffusion_model.state_dict() + cm = self.control_model.state_dict() + + for k in sd: + try: + set_attr(self.control_model, k, sd[k]) + except: + pass + + for k in self.control_weights: + if k not in {"lora_controlnet"}: + set_attr(self.control_model, k, self.control_weights[k].to(model_management.get_torch_device())) + + def copy(self): + c = ControlLora(self.control_weights, global_average_pooling=self.global_average_pooling) + self.copy_to(c) + return c + + def cleanup(self): + del self.control_model + self.control_model = None + super().cleanup() + + def get_models(self): + out = ControlBase.get_models(self) + return out def load_controlnet(ckpt_path, model=None): controlnet_data = utils.load_torch_file(ckpt_path, safe_load=True) + if "lora_controlnet" in controlnet_data: + return ControlLora(controlnet_data) controlnet_config = None if "controlnet_cond_embedding.conv_in.weight" in controlnet_data: #diffusers format From b80c3276dce510973c24d1c9b7fb48be36292396 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 18 Aug 2023 16:32:23 -0400 Subject: [PATCH 04/26] Fix issue with gligen. --- comfy/ldm/modules/attention.py | 20 ++++++++++--------- .../modules/diffusionmodules/openaimodel.py | 6 +++--- 2 files changed, 14 insertions(+), 12 deletions(-) diff --git a/comfy/ldm/modules/attention.py b/comfy/ldm/modules/attention.py index 87a4aa807..973619bf2 100644 --- a/comfy/ldm/modules/attention.py +++ b/comfy/ldm/modules/attention.py @@ -16,6 +16,8 @@ if model_management.xformers_enabled(): import xformers.ops from comfy.cli_args import args +import comfy.ops + # CrossAttn precision handling if args.dont_upcast_attention: print("disabling upcasting of attention") @@ -51,7 +53,7 @@ def init_(tensor): # feedforward class GEGLU(nn.Module): - def __init__(self, dim_in, dim_out, dtype=None, device=None, operations=None): + def __init__(self, dim_in, dim_out, dtype=None, device=None, operations=comfy.ops): super().__init__() self.proj = operations.Linear(dim_in, dim_out * 2, dtype=dtype, device=device) @@ -61,7 +63,7 @@ class GEGLU(nn.Module): class FeedForward(nn.Module): - def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0., dtype=None, device=None, operations=None): + def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0., dtype=None, device=None, operations=comfy.ops): super().__init__() inner_dim = int(dim * mult) dim_out = default(dim_out, dim) @@ -147,7 +149,7 @@ class SpatialSelfAttention(nn.Module): class CrossAttentionBirchSan(nn.Module): - def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0., dtype=None, device=None, operations=None): + def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0., dtype=None, device=None, operations=comfy.ops): super().__init__() inner_dim = dim_head * heads context_dim = default(context_dim, query_dim) @@ -244,7 +246,7 @@ class CrossAttentionBirchSan(nn.Module): class CrossAttentionDoggettx(nn.Module): - def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0., dtype=None, device=None, operations=None): + def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0., dtype=None, device=None, operations=comfy.ops): super().__init__() inner_dim = dim_head * heads context_dim = default(context_dim, query_dim) @@ -342,7 +344,7 @@ class CrossAttentionDoggettx(nn.Module): return self.to_out(r2) class CrossAttention(nn.Module): - def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0., dtype=None, device=None, operations=None): + def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0., dtype=None, device=None, operations=comfy.ops): super().__init__() inner_dim = dim_head * heads context_dim = default(context_dim, query_dim) @@ -398,7 +400,7 @@ class CrossAttention(nn.Module): class MemoryEfficientCrossAttention(nn.Module): # https://github.com/MatthieuTPHR/diffusers/blob/d80b531ff8060ec1ea982b65a1b8df70f73aa67c/src/diffusers/models/attention.py#L223 - def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.0, dtype=None, device=None, operations=None): + def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.0, dtype=None, device=None, operations=comfy.ops): super().__init__() print(f"Setting up {self.__class__.__name__}. Query dim is {query_dim}, context_dim is {context_dim} and using " f"{heads} heads.") @@ -449,7 +451,7 @@ class MemoryEfficientCrossAttention(nn.Module): return self.to_out(out) class CrossAttentionPytorch(nn.Module): - def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0., dtype=None, device=None, operations=None): + def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0., dtype=None, device=None, operations=comfy.ops): super().__init__() inner_dim = dim_head * heads context_dim = default(context_dim, query_dim) @@ -507,7 +509,7 @@ else: class BasicTransformerBlock(nn.Module): def __init__(self, dim, n_heads, d_head, dropout=0., context_dim=None, gated_ff=True, checkpoint=True, - disable_self_attn=False, dtype=None, device=None, operations=None): + disable_self_attn=False, dtype=None, device=None, operations=comfy.ops): super().__init__() self.disable_self_attn = disable_self_attn self.attn1 = CrossAttention(query_dim=dim, heads=n_heads, dim_head=d_head, dropout=dropout, @@ -647,7 +649,7 @@ class SpatialTransformer(nn.Module): def __init__(self, in_channels, n_heads, d_head, depth=1, dropout=0., context_dim=None, disable_self_attn=False, use_linear=False, - use_checkpoint=True, dtype=None, device=None, operations=None): + use_checkpoint=True, dtype=None, device=None, operations=comfy.ops): super().__init__() if exists(context_dim) and not isinstance(context_dim, list): context_dim = [context_dim] * depth diff --git a/comfy/ldm/modules/diffusionmodules/openaimodel.py b/comfy/ldm/modules/diffusionmodules/openaimodel.py index 8063adb85..11cec0eda 100644 --- a/comfy/ldm/modules/diffusionmodules/openaimodel.py +++ b/comfy/ldm/modules/diffusionmodules/openaimodel.py @@ -70,7 +70,7 @@ class Upsample(nn.Module): upsampling occurs in the inner-two dimensions. """ - def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1, dtype=None, device=None, operations=None): + def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1, dtype=None, device=None, operations=comfy.ops): super().__init__() self.channels = channels self.out_channels = out_channels or channels @@ -106,7 +106,7 @@ class Downsample(nn.Module): downsampling occurs in the inner-two dimensions. """ - def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1, dtype=None, device=None, operations=None): + def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1, dtype=None, device=None, operations=comfy.ops): super().__init__() self.channels = channels self.out_channels = out_channels or channels @@ -156,7 +156,7 @@ class ResBlock(TimestepBlock): down=False, dtype=None, device=None, - operations=None + operations=comfy.ops ): super().__init__() self.channels = channels From 81ccacaa7c0f4df860480cfb3c467467e3c50ec5 Mon Sep 17 00:00:00 2001 From: ncpt <57245077+NoCrypt@users.noreply.github.com> Date: Sat, 19 Aug 2023 17:36:13 +0700 Subject: [PATCH 05/26] Make the extensions loads in parallel instead of waiting one by one --- web/scripts/app.js | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/web/scripts/app.js b/web/scripts/app.js index 1c95c765c..6a2c63290 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -1026,18 +1026,21 @@ export class ComfyApp { } /** - * Loads all extensions from the API into the window + * Loads all extensions from the API into the window in parallel */ async #loadExtensions() { - const extensions = await api.getExtensions(); - this.logging.addEntry("Comfy.App", "debug", { Extensions: extensions }); - for (const ext of extensions) { - try { - await import(api.apiURL(ext)); - } catch (error) { - console.error("Error loading extension", ext, error); - } - } + const extensions = await api.getExtensions(); + this.logging.addEntry("Comfy.App", "debug", { Extensions: extensions }); + + const extensionPromises = extensions.map(async ext => { + try { + await import(api.apiURL(ext)); + } catch (error) { + console.error("Error loading extension", ext, error); + } + }); + + await Promise.all(extensionPromises); } /** From c9b562aed153cb35d4ce4126caf86995b0c63b12 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sat, 19 Aug 2023 12:13:13 -0400 Subject: [PATCH 06/26] Free more memory before VAE encode/decode. --- comfy/sd.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/comfy/sd.py b/comfy/sd.py index 48b1a8ce7..dc5daffac 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -649,7 +649,7 @@ class VAE: def decode(self, samples_in): self.first_stage_model = self.first_stage_model.to(self.device) try: - memory_used = (2562 * samples_in.shape[2] * samples_in.shape[3] * 64) * 1.4 + memory_used = (2562 * samples_in.shape[2] * samples_in.shape[3] * 64) * 1.7 model_management.free_memory(memory_used, self.device) free_memory = model_management.get_free_memory(self.device) batch_number = int(free_memory / memory_used) @@ -677,7 +677,7 @@ class VAE: self.first_stage_model = self.first_stage_model.to(self.device) pixel_samples = pixel_samples.movedim(-1,1) try: - memory_used = (2078 * pixel_samples.shape[2] * pixel_samples.shape[3]) * 1.4 #NOTE: this constant along with the one in the decode above are estimated from the mem usage for the VAE and could change. + memory_used = (2078 * pixel_samples.shape[2] * pixel_samples.shape[3]) * 1.7 #NOTE: this constant along with the one in the decode above are estimated from the mem usage for the VAE and could change. model_management.free_memory(memory_used, self.device) free_memory = model_management.get_free_memory(self.device) batch_number = int(free_memory / memory_used) From e9469e732d8c84048dcf4c94604607c92e5297df Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sun, 20 Aug 2023 04:00:53 -0400 Subject: [PATCH 07/26] --disable-smart-memory now disables loading model directly to vram. --- comfy/model_management.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/comfy/model_management.py b/comfy/model_management.py index 5c5d5ab74..80f6620a0 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -397,6 +397,9 @@ def unet_inital_load_device(parameters, dtype): return torch_dev cpu_dev = torch.device("cpu") + if DISABLE_SMART_MEMORY: + return cpu_dev + dtype_size = 4 if dtype == torch.float16 or dtype == torch.bfloat16: dtype_size = 2 From 2c096e4260a25b739e144d7aadd8d1a38f183993 Mon Sep 17 00:00:00 2001 From: Simon Lui <502929+simonlui@users.noreply.github.com> Date: Thu, 17 Aug 2023 03:12:17 -0700 Subject: [PATCH 08/26] Add ipex optimize and other enhancements for Intel GPUs based on recent memory changes. --- comfy/cli_args.py | 2 ++ comfy/model_management.py | 24 +++++++++++++++++++----- 2 files changed, 21 insertions(+), 5 deletions(-) diff --git a/comfy/cli_args.py b/comfy/cli_args.py index 374dd2f7d..b4f22f319 100644 --- a/comfy/cli_args.py +++ b/comfy/cli_args.py @@ -58,6 +58,8 @@ fpvae_group.add_argument("--bf16-vae", action="store_true", help="Run the VAE in parser.add_argument("--directml", type=int, nargs="?", metavar="DIRECTML_DEVICE", const=-1, help="Use torch-directml.") +parser.add_argument("--disable-ipex-optimize", action="store_true", help="Disables ipex.optimize when loading models with Intel GPUs.") + class LatentPreviewMethod(enum.Enum): NoPreviews = "none" Auto = "auto" diff --git a/comfy/model_management.py b/comfy/model_management.py index 80f6620a0..83e2f6dfa 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -88,8 +88,10 @@ def get_total_memory(dev=None, torch_total_too=False): mem_total = 1024 * 1024 * 1024 #TODO mem_total_torch = mem_total elif xpu_available: + stats = torch.xpu.memory_stats(dev) + mem_reserved = stats['reserved_bytes.all.current'] mem_total = torch.xpu.get_device_properties(dev).total_memory - mem_total_torch = mem_total + mem_total_torch = mem_reserved else: stats = torch.cuda.memory_stats(dev) mem_reserved = stats['reserved_bytes.all.current'] @@ -208,6 +210,7 @@ if DISABLE_SMART_MEMORY: print("Disabling smart memory management") def get_torch_device_name(device): + global xpu_available if hasattr(device, 'type'): if device.type == "cuda": try: @@ -217,6 +220,8 @@ def get_torch_device_name(device): return "{} {} : {}".format(device, torch.cuda.get_device_name(device), allocator_backend) else: return "{}".format(device.type) + elif xpu_available: + return "{} {}".format(device, torch.xpu.get_device_name(device)) else: return "CUDA {}: {}".format(device, torch.cuda.get_device_name(device)) @@ -244,6 +249,7 @@ class LoadedModel: return self.model_memory() def model_load(self, lowvram_model_memory=0): + global xpu_available patch_model_to = None if lowvram_model_memory == 0: patch_model_to = self.device @@ -264,6 +270,10 @@ class LoadedModel: accelerate.dispatch_model(self.real_model, device_map=device_map, main_device=self.device) self.model_accelerated = True + if xpu_available and not args.disable_ipex_optimize: + self.real_model.training = False + self.real_model = torch.xpu.optimize(self.real_model, inplace=True) + return self.real_model def model_unload(self): @@ -500,8 +510,12 @@ def get_free_memory(dev=None, torch_free_too=False): mem_free_total = 1024 * 1024 * 1024 #TODO mem_free_torch = mem_free_total elif xpu_available: - mem_free_total = torch.xpu.get_device_properties(dev).total_memory - torch.xpu.memory_allocated(dev) - mem_free_torch = mem_free_total + stats = torch.xpu.memory_stats(dev) + mem_active = stats['active_bytes.all.current'] + mem_allocated = stats['allocated_bytes.all.current'] + mem_reserved = stats['reserved_bytes.all.current'] + mem_free_torch = mem_reserved - mem_active + mem_free_total = torch.xpu.get_device_properties(dev).total_memory - mem_allocated + mem_free_torch else: stats = torch.cuda.memory_stats(dev) mem_active = stats['active_bytes.all.current'] @@ -573,10 +587,10 @@ def should_use_fp16(device=None, model_params=0): if directml_enabled: return False - if cpu_mode() or mps_mode() or xpu_available: + if cpu_mode() or mps_mode(): return False #TODO ? - if torch.cuda.is_bf16_supported(): + if torch.cuda.is_bf16_supported() or xpu_available: return True props = torch.cuda.get_device_properties("cuda") From 92254659753b7ee6dfed9d68bbeb6fe9e743749e Mon Sep 17 00:00:00 2001 From: Simon Lui <502929+simonlui@users.noreply.github.com> Date: Sat, 19 Aug 2023 21:35:22 -0700 Subject: [PATCH 09/26] Further tuning and fix mem_free_total. --- comfy/model_management.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index 83e2f6dfa..51f631416 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -271,8 +271,7 @@ class LoadedModel: self.model_accelerated = True if xpu_available and not args.disable_ipex_optimize: - self.real_model.training = False - self.real_model = torch.xpu.optimize(self.real_model, inplace=True) + self.real_model = torch.xpu.optimize(self.real_model.eval(), inplace=True, auto_kernel_selection=True, graph_mode=True) return self.real_model @@ -515,7 +514,7 @@ def get_free_memory(dev=None, torch_free_too=False): mem_allocated = stats['allocated_bytes.all.current'] mem_reserved = stats['reserved_bytes.all.current'] mem_free_torch = mem_reserved - mem_active - mem_free_total = torch.xpu.get_device_properties(dev).total_memory - mem_allocated + mem_free_torch + mem_free_total = torch.xpu.get_device_properties(dev).total_memory - mem_allocated else: stats = torch.cuda.memory_stats(dev) mem_active = stats['active_bytes.all.current'] From 9b1d5a587cee63771c1184d0942093bf64b54d96 Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Sun, 20 Aug 2023 19:55:48 +0100 Subject: [PATCH 10/26] Allow loading js extensions without copying to /web folder --- nodes.py | 11 +++++++++++ server.py | 23 +++++++++++++++++++++-- 2 files changed, 32 insertions(+), 2 deletions(-) diff --git a/nodes.py b/nodes.py index e46aed825..27a329c68 100644 --- a/nodes.py +++ b/nodes.py @@ -1673,6 +1673,8 @@ NODE_DISPLAY_NAME_MAPPINGS = { "VAEEncodeTiled": "VAE Encode (Tiled)", } +EXTENSION_WEB_DIRS = {} + def load_custom_node(module_path, ignore=set()): module_name = os.path.basename(module_path) if os.path.isfile(module_path): @@ -1681,11 +1683,20 @@ def load_custom_node(module_path, ignore=set()): try: if os.path.isfile(module_path): module_spec = importlib.util.spec_from_file_location(module_name, module_path) + module_dir = os.path.split(module_path)[0] else: module_spec = importlib.util.spec_from_file_location(module_name, os.path.join(module_path, "__init__.py")) + module_dir = module_path + module = importlib.util.module_from_spec(module_spec) sys.modules[module_name] = module module_spec.loader.exec_module(module) + + if hasattr(module, "WEB_DIRECTORY") and getattr(module, "WEB_DIRECTORY") is not None: + web_dir = os.path.abspath(os.path.join(module_dir, getattr(module, "WEB_DIRECTORY"))) + if os.path.isdir(web_dir): + EXTENSION_WEB_DIRS[module_name] = web_dir + if hasattr(module, "NODE_CLASS_MAPPINGS") and getattr(module, "NODE_CLASS_MAPPINGS") is not None: for name in module.NODE_CLASS_MAPPINGS: if name not in ignore: diff --git a/server.py b/server.py index fab33be3e..344847b35 100644 --- a/server.py +++ b/server.py @@ -5,6 +5,7 @@ import nodes import folder_paths import execution import uuid +import urllib import json import glob import struct @@ -67,6 +68,8 @@ class PromptServer(): mimetypes.init() mimetypes.types_map['.js'] = 'application/javascript; charset=utf-8' + + self.supports = ["custom_nodes_from_web"] self.prompt_queue = None self.loop = loop self.messages = asyncio.Queue() @@ -123,8 +126,18 @@ class PromptServer(): @routes.get("/extensions") async def get_extensions(request): - files = glob.glob(os.path.join(self.web_root, 'extensions/**/*.js'), recursive=True) - return web.json_response(list(map(lambda f: "/" + os.path.relpath(f, self.web_root).replace("\\", "/"), files))) + files = glob.glob(os.path.join( + self.web_root, 'extensions/**/*.js'), recursive=True) + + extensions = list(map(lambda f: "/" + os.path.relpath(f, self.web_root).replace("\\", "/"), files)) + + for name, dir in nodes.EXTENSION_WEB_DIRS.items(): + files = glob.glob(os.path.join(dir, '**/*.js'), recursive=True) + extensions.extend(list(map(lambda f: "/extensions/" + urllib.parse.quote( + name) + "/" + os.path.relpath(f, dir).replace("\\", "/"), files))) + print(extensions) + + return web.json_response(extensions) def get_dir_by_type(dir_type): if dir_type is None: @@ -492,6 +505,12 @@ class PromptServer(): def add_routes(self): self.app.add_routes(self.routes) + + for name, dir in nodes.EXTENSION_WEB_DIRS.items(): + self.app.add_routes([ + web.static('/extensions/' + urllib.parse.quote(name), dir, follow_symlinks=True), + ]) + self.app.add_routes([ web.static('/', self.web_root, follow_symlinks=True), ]) From 0d7b0a4dc7e847b08a9c4b9262f7f64a6d2040b2 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sun, 20 Aug 2023 14:56:47 -0400 Subject: [PATCH 11/26] Small cleanups. --- comfy/model_management.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index 51f631416..fc0cb9011 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -589,7 +589,10 @@ def should_use_fp16(device=None, model_params=0): if cpu_mode() or mps_mode(): return False #TODO ? - if torch.cuda.is_bf16_supported() or xpu_available: + if xpu_available: + return True + + if torch.cuda.is_bf16_supported(): return True props = torch.cuda.get_device_properties("cuda") From cdaf65ceb134918d6be174a8231d112b5661e316 Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Sun, 20 Aug 2023 20:01:25 +0100 Subject: [PATCH 12/26] remove log --- server.py | 1 - 1 file changed, 1 deletion(-) diff --git a/server.py b/server.py index 344847b35..d1295342b 100644 --- a/server.py +++ b/server.py @@ -135,7 +135,6 @@ class PromptServer(): files = glob.glob(os.path.join(dir, '**/*.js'), recursive=True) extensions.extend(list(map(lambda f: "/extensions/" + urllib.parse.quote( name) + "/" + os.path.relpath(f, dir).replace("\\", "/"), files))) - print(extensions) return web.json_response(extensions) From d08e53de2e063b3797ea5752a67da37a55c9e0ed Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sun, 20 Aug 2023 21:47:32 -0400 Subject: [PATCH 13/26] Remove autocast from controlnet code. --- comfy/cldm/cldm.py | 5 +---- comfy/sd.py | 20 ++++++++------------ 2 files changed, 9 insertions(+), 16 deletions(-) diff --git a/comfy/cldm/cldm.py b/comfy/cldm/cldm.py index 5201b3c26..251483131 100644 --- a/comfy/cldm/cldm.py +++ b/comfy/cldm/cldm.py @@ -279,7 +279,7 @@ class ControlNet(nn.Module): return TimestepEmbedSequential(zero_module(operations.conv_nd(self.dims, channels, channels, 1, padding=0))) def forward(self, x, hint, timesteps, context, y=None, **kwargs): - t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False) + t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False).to(self.dtype) emb = self.time_embed(t_emb) guided_hint = self.input_hint_block(hint, emb, context) @@ -287,9 +287,6 @@ class ControlNet(nn.Module): outs = [] hs = [] - t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False) - emb = self.time_embed(t_emb) - if self.num_classes is not None: assert y.shape[0] == x.shape[0] emb = emb + self.label_emb(y) diff --git a/comfy/sd.py b/comfy/sd.py index dc5daffac..85806e705 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -798,17 +798,14 @@ class ControlNet(ControlBase): if x_noisy.shape[0] != self.cond_hint.shape[0]: self.cond_hint = broadcast_image_to(self.cond_hint, x_noisy.shape[0], batched_number) - if self.control_model.dtype == torch.float16: - precision_scope = torch.autocast - else: - precision_scope = contextlib.nullcontext - with precision_scope(model_management.get_autocast_device(self.device)): - context = torch.cat(cond['c_crossattn'], 1) - y = cond.get('c_adm', None) - control = self.control_model(x=x_noisy, hint=self.cond_hint, timesteps=t, context=context, y=y) + context = torch.cat(cond['c_crossattn'], 1) + y = cond.get('c_adm', None) + if y is not None: + y = y.to(self.control_model.dtype) + control = self.control_model(x=x_noisy.to(self.control_model.dtype), hint=self.cond_hint, timesteps=t, context=context.to(self.control_model.dtype), y=y) + out = {'middle':[], 'output': []} - autocast_enabled = torch.is_autocast_enabled() for i in range(len(control)): if i == (len(control) - 1): @@ -822,7 +819,7 @@ class ControlNet(ControlBase): x = torch.mean(x, dim=(2, 3), keepdim=True).repeat(1, 1, x.shape[2], x.shape[3]) x *= self.strength - if x.dtype != output_dtype and not autocast_enabled: + if x.dtype != output_dtype: x = x.to(output_dtype) if control_prev is not None and key in control_prev: @@ -1098,11 +1095,10 @@ class T2IAdapter(ControlBase): output_dtype = x_noisy.dtype out = {'input':[]} - autocast_enabled = torch.is_autocast_enabled() for i in range(len(self.control_input)): key = 'input' x = self.control_input[i] * self.strength - if x.dtype != output_dtype and not autocast_enabled: + if x.dtype != output_dtype: x = x.to(output_dtype) if control_prev is not None and key in control_prev: From 199d73364a58caa64668d5bcfc144a2c10899424 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 21 Aug 2023 00:54:04 -0400 Subject: [PATCH 14/26] Fix ControlLora on lowvram. --- comfy/sd.py | 23 ++++++++++++++++++----- 1 file changed, 18 insertions(+), 5 deletions(-) diff --git a/comfy/sd.py b/comfy/sd.py index 85806e705..b0482c782 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -243,6 +243,13 @@ def set_attr(obj, attr, value): setattr(obj, attrs[-1], torch.nn.Parameter(value)) del prev +def get_attr(obj, attr): + attrs = attr.split(".") + for name in attrs: + obj = getattr(obj, name) + return obj + + class ModelPatcher: def __init__(self, model, load_device, offload_device, size=0, current_device=None): self.size = size @@ -856,9 +863,9 @@ class ControlLoraOps: def forward(self, input): if self.up is not None: - return torch.nn.functional.linear(input, self.weight + (torch.mm(self.up.flatten(start_dim=1), self.down.flatten(start_dim=1))).reshape(self.weight.shape).type(self.weight.dtype), self.bias) + return torch.nn.functional.linear(input, self.weight.to(input.device) + (torch.mm(self.up.flatten(start_dim=1), self.down.flatten(start_dim=1))).reshape(self.weight.shape).type(input.dtype), self.bias) else: - return torch.nn.functional.linear(input, self.weight, self.bias) + return torch.nn.functional.linear(input, self.weight.to(input.device), self.bias) class Conv2d(torch.nn.Module): def __init__( @@ -895,9 +902,9 @@ class ControlLoraOps: def forward(self, input): if self.up is not None: - return torch.nn.functional.conv2d(input, self.weight + (torch.mm(self.up.flatten(start_dim=1), self.down.flatten(start_dim=1))).reshape(self.weight.shape).type(self.weight.dtype), self.bias, self.stride, self.padding, self.dilation, self.groups) + return torch.nn.functional.conv2d(input, self.weight.to(input.device) + (torch.mm(self.up.flatten(start_dim=1), self.down.flatten(start_dim=1))).reshape(self.weight.shape).type(input.dtype), self.bias, self.stride, self.padding, self.dilation, self.groups) else: - return torch.nn.functional.conv2d(input, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups) + return torch.nn.functional.conv2d(input, self.weight.to(input.device), self.bias, self.stride, self.padding, self.dilation, self.groups) def conv_nd(self, dims, *args, **kwargs): if dims == 2: @@ -927,8 +934,14 @@ class ControlLora(ControlNet): cm = self.control_model.state_dict() for k in sd: + weight = sd[k] + if weight.device == torch.device("meta"): #lowvram NOTE: this depends on the inner working of the accelerate library so it might break. + key_split = k.split('.') # I have no idea why they don't just leave the weight there instead of using the meta device. + op = get_attr(diffusion_model, '.'.join(key_split[:-1])) + weight = op._hf_hook.weights_map[key_split[-1]] + try: - set_attr(self.control_model, k, sd[k]) + set_attr(self.control_model, k, weight) except: pass From 763b0cf024c8fd462343ab0a8cfdab099714168b Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 21 Aug 2023 20:38:31 -0400 Subject: [PATCH 15/26] Fix control lora not working in fp32. --- comfy/sd.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/comfy/sd.py b/comfy/sd.py index b0482c782..3493b1a75 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -926,8 +926,8 @@ class ControlLora(ControlNet): controlnet_config["hint_channels"] = self.control_weights["input_hint_block.0.weight"].shape[1] controlnet_config["operations"] = ControlLoraOps() self.control_model = cldm.ControlNet(**controlnet_config) - if model_management.should_use_fp16(): - self.control_model.half() + dtype = model.get_dtype() + self.control_model.to(dtype) self.control_model.to(model_management.get_torch_device()) diffusion_model = model.diffusion_model sd = diffusion_model.state_dict() @@ -947,7 +947,7 @@ class ControlLora(ControlNet): for k in self.control_weights: if k not in {"lora_controlnet"}: - set_attr(self.control_model, k, self.control_weights[k].to(model_management.get_torch_device())) + set_attr(self.control_model, k, self.control_weights[k].to(dtype).to(model_management.get_torch_device())) def copy(self): c = ControlLora(self.control_weights, global_average_pooling=self.global_average_pooling) From cf5ae469283283973466f33ebee9b873b44e44d2 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 21 Aug 2023 23:20:49 -0400 Subject: [PATCH 16/26] Controlnet/t2iadapter cleanup. --- .../modules/diffusionmodules/openaimodel.py | 4 +- comfy/sd.py | 105 +++++++++--------- comfy/t2i_adapter/adapter.py | 4 + 3 files changed, 58 insertions(+), 55 deletions(-) diff --git a/comfy/ldm/modules/diffusionmodules/openaimodel.py b/comfy/ldm/modules/diffusionmodules/openaimodel.py index 11cec0eda..3ce3c2e7b 100644 --- a/comfy/ldm/modules/diffusionmodules/openaimodel.py +++ b/comfy/ldm/modules/diffusionmodules/openaimodel.py @@ -632,7 +632,9 @@ class UNetModel(nn.Module): transformer_options["block"] = ("middle", 0) h = forward_timestep_embed(self.middle_block, h, emb, context, transformer_options) if control is not None and 'middle' in control and len(control['middle']) > 0: - h += control['middle'].pop() + ctrl = control['middle'].pop() + if ctrl is not None: + h += ctrl for id, module in enumerate(self.output_blocks): transformer_options["block"] = ("output", id) diff --git a/comfy/sd.py b/comfy/sd.py index 3493b1a75..09eab5053 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -742,6 +742,7 @@ class ControlBase: device = model_management.get_torch_device() self.device = device self.previous_controlnet = None + self.global_average_pooling = False def set_cond_hint(self, cond_hint, strength=1.0, timestep_percent_range=(1.0, 0.0)): self.cond_hint_original = cond_hint @@ -777,6 +778,51 @@ class ControlBase: c.strength = self.strength c.timestep_percent_range = self.timestep_percent_range + def control_merge(self, control_input, control_output, control_prev, output_dtype): + out = {'input':[], 'middle':[], 'output': []} + + if control_input is not None: + for i in range(len(control_input)): + key = 'input' + x = control_input[i] + if x is not None: + x *= self.strength + if x.dtype != output_dtype: + x = x.to(output_dtype) + out[key].insert(0, x) + + if control_output is not None: + for i in range(len(control_output)): + if i == (len(control_output) - 1): + key = 'middle' + index = 0 + else: + key = 'output' + index = i + x = control_output[i] + if x is not None: + if self.global_average_pooling: + x = torch.mean(x, dim=(2, 3), keepdim=True).repeat(1, 1, x.shape[2], x.shape[3]) + + x *= self.strength + if x.dtype != output_dtype: + x = x.to(output_dtype) + + out[key].append(x) + if control_prev is not None: + for x in ['input', 'middle', 'output']: + o = out[x] + for i in range(len(control_prev[x])): + prev_val = control_prev[x][i] + if i >= len(o): + o.append(prev_val) + elif prev_val is not None: + if o[i] is None: + o[i] = prev_val + else: + o[i] += prev_val + return out + class ControlNet(ControlBase): def __init__(self, control_model, global_average_pooling=False, device=None): super().__init__(device) @@ -811,32 +857,7 @@ class ControlNet(ControlBase): if y is not None: y = y.to(self.control_model.dtype) control = self.control_model(x=x_noisy.to(self.control_model.dtype), hint=self.cond_hint, timesteps=t, context=context.to(self.control_model.dtype), y=y) - - out = {'middle':[], 'output': []} - - for i in range(len(control)): - if i == (len(control) - 1): - key = 'middle' - index = 0 - else: - key = 'output' - index = i - x = control[i] - if self.global_average_pooling: - x = torch.mean(x, dim=(2, 3), keepdim=True).repeat(1, 1, x.shape[2], x.shape[3]) - - x *= self.strength - if x.dtype != output_dtype: - x = x.to(output_dtype) - - if control_prev is not None and key in control_prev: - prev = control_prev[key][index] - if prev is not None: - x += prev - out[key].append(x) - if control_prev is not None and 'input' in control_prev: - out['input'] = control_prev['input'] - return out + return self.control_merge(None, control, control_prev, output_dtype) def copy(self): c = ControlNet(self.control_model, global_average_pooling=self.global_average_pooling) @@ -1101,37 +1122,13 @@ class T2IAdapter(ControlBase): if x_noisy.shape[0] != self.cond_hint.shape[0]: self.cond_hint = broadcast_image_to(self.cond_hint, x_noisy.shape[0], batched_number) if self.control_input is None: + self.t2i_model.to(x_noisy.dtype) self.t2i_model.to(self.device) - self.control_input = self.t2i_model(self.cond_hint) + self.control_input = self.t2i_model(self.cond_hint.to(x_noisy.dtype)) self.t2i_model.cpu() - output_dtype = x_noisy.dtype - out = {'input':[]} - - for i in range(len(self.control_input)): - key = 'input' - x = self.control_input[i] * self.strength - if x.dtype != output_dtype: - x = x.to(output_dtype) - - if control_prev is not None and key in control_prev: - index = len(control_prev[key]) - i * 3 - 3 - prev = control_prev[key][index] - if prev is not None: - x += prev - out[key].insert(0, None) - out[key].insert(0, None) - out[key].insert(0, x) - - if control_prev is not None and 'input' in control_prev: - for i in range(len(out['input'])): - if out['input'][i] is None: - out['input'][i] = control_prev['input'][i] - if control_prev is not None and 'middle' in control_prev: - out['middle'] = control_prev['middle'] - if control_prev is not None and 'output' in control_prev: - out['output'] = control_prev['output'] - return out + control_input = list(map(lambda a: None if a is None else a.clone(), self.control_input)) + return self.control_merge(control_input, None, control_prev, x_noisy.dtype) def copy(self): c = T2IAdapter(self.t2i_model, self.channels_in) diff --git a/comfy/t2i_adapter/adapter.py b/comfy/t2i_adapter/adapter.py index 87e3d859e..3647c4cf7 100644 --- a/comfy/t2i_adapter/adapter.py +++ b/comfy/t2i_adapter/adapter.py @@ -128,6 +128,8 @@ class Adapter(nn.Module): for j in range(self.nums_rb): idx = i * self.nums_rb + j x = self.body[idx](x) + features.append(None) + features.append(None) features.append(x) return features @@ -259,6 +261,8 @@ class Adapter_light(nn.Module): features = [] for i in range(len(self.channels)): x = self.body[i](x) + features.append(None) + features.append(None) features.append(x) return features From e2256b40879d36e804f226db98574b64532da3eb Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 22 Aug 2023 01:44:31 -0400 Subject: [PATCH 17/26] Add clip_vision_g download command to colab notebook for ReVision. --- notebooks/comfyui_colab.ipynb | 2 ++ 1 file changed, 2 insertions(+) diff --git a/notebooks/comfyui_colab.ipynb b/notebooks/comfyui_colab.ipynb index b1c487101..e4264a798 100644 --- a/notebooks/comfyui_colab.ipynb +++ b/notebooks/comfyui_colab.ipynb @@ -75,6 +75,8 @@ "#!wget -c https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/sd_xl_base_1.0.safetensors -P ./models/checkpoints/\n", "#!wget -c https://huggingface.co/stabilityai/stable-diffusion-xl-refiner-1.0/resolve/main/sd_xl_refiner_1.0.safetensors -P ./models/checkpoints/\n", "\n", + "# SDXL ReVision\n", + "#!wget -c https://huggingface.co/comfyanonymous/clip_vision_g/resolve/main/clip_vision_g.safetensors -P ./models/clip_vision/\n", "\n", "# SD1.5\n", "!wget -c https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.ckpt -P ./models/checkpoints/\n", From f2a7cc912186c89fda9580f36da28c7fc382ea26 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 22 Aug 2023 01:55:09 -0400 Subject: [PATCH 18/26] Add control lora links to colab notebook. --- notebooks/comfyui_colab.ipynb | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/notebooks/comfyui_colab.ipynb b/notebooks/comfyui_colab.ipynb index e4264a798..4fdccaace 100644 --- a/notebooks/comfyui_colab.ipynb +++ b/notebooks/comfyui_colab.ipynb @@ -144,6 +144,11 @@ "#!wget -c https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11p_sd15s2_lineart_anime_fp16.safetensors -P ./models/controlnet/\n", "#!wget -c https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11u_sd15_tile_fp16.safetensors -P ./models/controlnet/\n", "\n", + "# ControlNet SDXL\n", + "#!wget -c https://huggingface.co/stabilityai/control-lora/resolve/main/control-LoRAs-rank256/control-lora-canny-rank256.safetensors -P ./models/controlnet/\n", + "#!wget -c https://huggingface.co/stabilityai/control-lora/resolve/main/control-LoRAs-rank256/control-lora-depth-rank256.safetensors -P ./models/controlnet/\n", + "#!wget -c https://huggingface.co/stabilityai/control-lora/resolve/main/control-LoRAs-rank256/control-lora-recolor-rank256.safetensors -P ./models/controlnet/\n", + "#!wget -c https://huggingface.co/stabilityai/control-lora/resolve/main/control-LoRAs-rank256/control-lora-sketch-rank256.safetensors -P ./models/controlnet/\n", "\n", "# Controlnet Preprocessor nodes by Fannovel16\n", "#!cd custom_nodes && git clone https://github.com/Fannovel16/comfy_controlnet_preprocessors; cd comfy_controlnet_preprocessors && python install.py\n", From 85fde89d7f5259aaec9e42cc8116c3dab70a69a4 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 22 Aug 2023 14:38:34 -0400 Subject: [PATCH 19/26] T2I adapter SDXL. --- comfy/sd.py | 21 ++++++++++++++++---- comfy/t2i_adapter/adapter.py | 37 ++++++++++++++++++++++++++++++------ 2 files changed, 48 insertions(+), 10 deletions(-) diff --git a/comfy/sd.py b/comfy/sd.py index 09eab5053..d75bbd9aa 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -1128,7 +1128,11 @@ class T2IAdapter(ControlBase): self.t2i_model.cpu() control_input = list(map(lambda a: None if a is None else a.clone(), self.control_input)) - return self.control_merge(control_input, None, control_prev, x_noisy.dtype) + mid = None + if self.t2i_model.xl == True: + mid = control_input[-1:] + control_input = control_input[:-1] + return self.control_merge(control_input, mid, control_prev, x_noisy.dtype) def copy(self): c = T2IAdapter(self.t2i_model, self.channels_in) @@ -1151,11 +1155,20 @@ def load_t2i_adapter(t2i_data): down_opts = list(filter(lambda a: a.endswith("down_opt.op.weight"), keys)) if len(down_opts) > 0: use_conv = True - model_ad = adapter.Adapter(cin=cin, channels=[channel, channel*2, channel*4, channel*4][:4], nums_rb=2, ksize=ksize, sk=True, use_conv=use_conv) + xl = False + if cin == 256: + xl = True + model_ad = adapter.Adapter(cin=cin, channels=[channel, channel*2, channel*4, channel*4][:4], nums_rb=2, ksize=ksize, sk=True, use_conv=use_conv, xl=xl) else: return None - model_ad.load_state_dict(t2i_data) - return T2IAdapter(model_ad, cin // 64) + missing, unexpected = model_ad.load_state_dict(t2i_data) + if len(missing) > 0: + print("t2i missing", missing) + + if len(unexpected) > 0: + print("t2i unexpected", unexpected) + + return T2IAdapter(model_ad, model_ad.input_channels) class StyleModel: diff --git a/comfy/t2i_adapter/adapter.py b/comfy/t2i_adapter/adapter.py index 3647c4cf7..000cf041c 100644 --- a/comfy/t2i_adapter/adapter.py +++ b/comfy/t2i_adapter/adapter.py @@ -101,17 +101,30 @@ class ResnetBlock(nn.Module): class Adapter(nn.Module): - def __init__(self, channels=[320, 640, 1280, 1280], nums_rb=3, cin=64, ksize=3, sk=False, use_conv=True): + def __init__(self, channels=[320, 640, 1280, 1280], nums_rb=3, cin=64, ksize=3, sk=False, use_conv=True, xl=True): super(Adapter, self).__init__() - self.unshuffle = nn.PixelUnshuffle(8) + unshuffle = 8 + resblock_no_downsample = [] + resblock_downsample = [3, 2, 1] + self.xl = xl + if self.xl: + unshuffle = 16 + resblock_no_downsample = [1] + resblock_downsample = [2] + + self.input_channels = cin // (unshuffle * unshuffle) + self.unshuffle = nn.PixelUnshuffle(unshuffle) self.channels = channels self.nums_rb = nums_rb self.body = [] for i in range(len(channels)): for j in range(nums_rb): - if (i != 0) and (j == 0): + if (i in resblock_downsample) and (j == 0): self.body.append( ResnetBlock(channels[i - 1], channels[i], down=True, ksize=ksize, sk=sk, use_conv=use_conv)) + elif (i in resblock_no_downsample) and (j == 0): + self.body.append( + ResnetBlock(channels[i - 1], channels[i], down=False, ksize=ksize, sk=sk, use_conv=use_conv)) else: self.body.append( ResnetBlock(channels[i], channels[i], down=False, ksize=ksize, sk=sk, use_conv=use_conv)) @@ -128,8 +141,16 @@ class Adapter(nn.Module): for j in range(self.nums_rb): idx = i * self.nums_rb + j x = self.body[idx](x) - features.append(None) - features.append(None) + if self.xl: + features.append(None) + if i == 0: + features.append(None) + features.append(None) + if i == 2: + features.append(None) + else: + features.append(None) + features.append(None) features.append(x) return features @@ -243,10 +264,14 @@ class extractor(nn.Module): class Adapter_light(nn.Module): def __init__(self, channels=[320, 640, 1280, 1280], nums_rb=3, cin=64): super(Adapter_light, self).__init__() - self.unshuffle = nn.PixelUnshuffle(8) + unshuffle = 8 + self.unshuffle = nn.PixelUnshuffle(unshuffle) + self.input_channels = cin // (unshuffle * unshuffle) self.channels = channels self.nums_rb = nums_rb self.body = [] + self.xl = False + for i in range(len(channels)): if i == 0: self.body.append(extractor(in_c=cin, inter_c=channels[i]//4, out_c=channels[i], nums_rb=nums_rb, down=False)) From afcb9cb1df9d67506ec0d08adf30f7be04e95090 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 22 Aug 2023 16:23:54 -0400 Subject: [PATCH 20/26] All resolutions now work with t2i adapter for SDXL. --- comfy/sd.py | 10 +++++++++- comfy/t2i_adapter/adapter.py | 14 +++++++------- 2 files changed, 16 insertions(+), 8 deletions(-) diff --git a/comfy/sd.py b/comfy/sd.py index d75bbd9aa..5920ddde7 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -2,6 +2,7 @@ import torch import contextlib import copy import inspect +import math from comfy import model_management from .ldm.util import instantiate_from_config @@ -1099,6 +1100,12 @@ class T2IAdapter(ControlBase): self.channels_in = channels_in self.control_input = None + def scale_image_to(self, width, height): + unshuffle_amount = self.t2i_model.unshuffle_amount + width = math.ceil(width / unshuffle_amount) * unshuffle_amount + height = math.ceil(height / unshuffle_amount) * unshuffle_amount + return width, height + def get_control(self, x_noisy, t, cond, batched_number): control_prev = None if self.previous_controlnet is not None: @@ -1116,7 +1123,8 @@ class T2IAdapter(ControlBase): del self.cond_hint self.control_input = None self.cond_hint = None - self.cond_hint = utils.common_upscale(self.cond_hint_original, x_noisy.shape[3] * 8, x_noisy.shape[2] * 8, 'nearest-exact', "center").float().to(self.device) + width, height = self.scale_image_to(x_noisy.shape[3] * 8, x_noisy.shape[2] * 8) + self.cond_hint = utils.common_upscale(self.cond_hint_original, width, height, 'nearest-exact', "center").float().to(self.device) if self.channels_in == 1 and self.cond_hint.shape[1] > 1: self.cond_hint = torch.mean(self.cond_hint, 1, keepdim=True) if x_noisy.shape[0] != self.cond_hint.shape[0]: diff --git a/comfy/t2i_adapter/adapter.py b/comfy/t2i_adapter/adapter.py index 000cf041c..e9a606b1c 100644 --- a/comfy/t2i_adapter/adapter.py +++ b/comfy/t2i_adapter/adapter.py @@ -103,17 +103,17 @@ class ResnetBlock(nn.Module): class Adapter(nn.Module): def __init__(self, channels=[320, 640, 1280, 1280], nums_rb=3, cin=64, ksize=3, sk=False, use_conv=True, xl=True): super(Adapter, self).__init__() - unshuffle = 8 + self.unshuffle_amount = 8 resblock_no_downsample = [] resblock_downsample = [3, 2, 1] self.xl = xl if self.xl: - unshuffle = 16 + self.unshuffle_amount = 16 resblock_no_downsample = [1] resblock_downsample = [2] - self.input_channels = cin // (unshuffle * unshuffle) - self.unshuffle = nn.PixelUnshuffle(unshuffle) + self.input_channels = cin // (self.unshuffle_amount * self.unshuffle_amount) + self.unshuffle = nn.PixelUnshuffle(self.unshuffle_amount) self.channels = channels self.nums_rb = nums_rb self.body = [] @@ -264,9 +264,9 @@ class extractor(nn.Module): class Adapter_light(nn.Module): def __init__(self, channels=[320, 640, 1280, 1280], nums_rb=3, cin=64): super(Adapter_light, self).__init__() - unshuffle = 8 - self.unshuffle = nn.PixelUnshuffle(unshuffle) - self.input_channels = cin // (unshuffle * unshuffle) + self.unshuffle_amount = 8 + self.unshuffle = nn.PixelUnshuffle(self.unshuffle_amount) + self.input_channels = cin // (self.unshuffle_amount * self.unshuffle_amount) self.channels = channels self.nums_rb = nums_rb self.body = [] From d7b3b0f8c11c6261d0d8b859ea98f2d818b7e67d Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 22 Aug 2023 19:41:49 -0400 Subject: [PATCH 21/26] Don't hardcode node names for image upload widget. --- nodes.py | 4 ++-- web/extensions/core/uploadImage.js | 3 ++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/nodes.py b/nodes.py index 27a329c68..b2f224ea3 100644 --- a/nodes.py +++ b/nodes.py @@ -1306,7 +1306,7 @@ class LoadImage: input_dir = folder_paths.get_input_directory() files = [f for f in os.listdir(input_dir) if os.path.isfile(os.path.join(input_dir, f))] return {"required": - {"image": (sorted(files), )}, + {"image": (sorted(files), {"image_upload": True})}, } CATEGORY = "image" @@ -1349,7 +1349,7 @@ class LoadImageMask: input_dir = folder_paths.get_input_directory() files = [f for f in os.listdir(input_dir) if os.path.isfile(os.path.join(input_dir, f))] return {"required": - {"image": (sorted(files), ), + {"image": (sorted(files), {"image_upload": True}), "channel": (s._color_channels, ), } } diff --git a/web/extensions/core/uploadImage.js b/web/extensions/core/uploadImage.js index f50473ae3..fda83f8cf 100644 --- a/web/extensions/core/uploadImage.js +++ b/web/extensions/core/uploadImage.js @@ -5,7 +5,8 @@ import { app } from "../../scripts/app.js"; app.registerExtension({ name: "Comfy.UploadImage", async beforeRegisterNodeDef(nodeType, nodeData, app) { - if (nodeData.name === "LoadImage" || nodeData.name === "LoadImageMask") { + console.log(nodeData); + if (nodeData?.input?.required?.image?.[1]?.image_upload === true) { nodeData.input.required.upload = ["IMAGEUPLOAD"]; } }, From f081017c1a20a5d9cfae9005fd0898502e3356be Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 23 Aug 2023 01:07:57 -0400 Subject: [PATCH 22/26] Save memory by storing text encoder weights in fp16 in most situations. Do inference in fp32 to make sure quality stays the exact same. --- comfy/model_management.py | 2 +- comfy/sd.py | 7 ++----- comfy/sd1_clip.py | 4 ++-- web/extensions/core/uploadImage.js | 1 - 4 files changed, 5 insertions(+), 9 deletions(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index fc0cb9011..9c100144e 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -433,7 +433,7 @@ def text_encoder_device(): return get_torch_device() elif vram_state == VRAMState.HIGH_VRAM or vram_state == VRAMState.NORMAL_VRAM: #NOTE: on a Ryzen 5 7600X with 4080 it's faster to shift to GPU - if torch.get_num_threads() < 8: #leaving the text encoder on the CPU is faster than shifting it if the CPU is fast enough. + if should_use_fp16() or torch.get_num_threads() < 8: #leaving the text encoder on the CPU is faster than shifting it if the CPU is fast enough. return get_torch_device() else: return torch.device("cpu") diff --git a/comfy/sd.py b/comfy/sd.py index 5920ddde7..7de72d37c 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -546,11 +546,8 @@ class CLIP: offload_device = model_management.text_encoder_offload_device() params['device'] = load_device self.cond_stage_model = clip(**(params)) - #TODO: make sure this doesn't have a quality loss before enabling. - # if model_management.should_use_fp16(load_device): - # self.cond_stage_model.half() - - self.cond_stage_model = self.cond_stage_model.to() + if model_management.should_use_fp16(load_device): + self.cond_stage_model.half() self.tokenizer = tokenizer(embedding_directory=embedding_directory) self.patcher = ModelPatcher(self.cond_stage_model, load_device=load_device, offload_device=offload_device) diff --git a/comfy/sd1_clip.py b/comfy/sd1_clip.py index feca41880..c699214af 100644 --- a/comfy/sd1_clip.py +++ b/comfy/sd1_clip.py @@ -137,9 +137,9 @@ class SD1ClipModel(torch.nn.Module, ClipTokenWeightEncoder): if backup_embeds.weight.dtype != torch.float32: precision_scope = torch.autocast else: - precision_scope = contextlib.nullcontext + precision_scope = lambda a, b: contextlib.nullcontext(a) - with precision_scope(model_management.get_autocast_device(device)): + with precision_scope(model_management.get_autocast_device(device), torch.float32): outputs = self.transformer(input_ids=tokens, output_hidden_states=self.layer=="hidden") self.transformer.set_input_embeddings(backup_embeds) diff --git a/web/extensions/core/uploadImage.js b/web/extensions/core/uploadImage.js index fda83f8cf..530c4599e 100644 --- a/web/extensions/core/uploadImage.js +++ b/web/extensions/core/uploadImage.js @@ -5,7 +5,6 @@ import { app } from "../../scripts/app.js"; app.registerExtension({ name: "Comfy.UploadImage", async beforeRegisterNodeDef(nodeType, nodeData, app) { - console.log(nodeData); if (nodeData?.input?.required?.image?.[1]?.image_upload === true) { nodeData.input.required.upload = ["IMAGEUPLOAD"]; } From 00c0b2c5073084c765579767c24f496117662f3a Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 23 Aug 2023 21:01:15 -0400 Subject: [PATCH 23/26] Initialize text encoder to target dtype. --- comfy/ops.py | 13 +++++++++++-- comfy/sd.py | 7 +++++-- comfy/sd1_clip.py | 6 ++++-- comfy/sd2_clip.py | 4 ++-- comfy/sdxl_clip.py | 14 +++++++------- 5 files changed, 29 insertions(+), 15 deletions(-) diff --git a/comfy/ops.py b/comfy/ops.py index 678c2c6d0..610d54584 100644 --- a/comfy/ops.py +++ b/comfy/ops.py @@ -28,9 +28,18 @@ def conv_nd(dims, *args, **kwargs): raise ValueError(f"unsupported dimensions: {dims}") @contextmanager -def use_comfy_ops(): # Kind of an ugly hack but I can't think of a better way +def use_comfy_ops(device=None, dtype=None): # Kind of an ugly hack but I can't think of a better way old_torch_nn_linear = torch.nn.Linear - torch.nn.Linear = Linear + force_device = device + force_dtype = dtype + def linear_with_dtype(in_features: int, out_features: int, bias: bool = True, device=None, dtype=None): + if force_device is not None: + device = force_device + if force_dtype is not None: + dtype = force_dtype + return Linear(in_features, out_features, bias=bias, device=device, dtype=dtype) + + torch.nn.Linear = linear_with_dtype try: yield finally: diff --git a/comfy/sd.py b/comfy/sd.py index 7de72d37c..2a593fbc6 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -545,9 +545,12 @@ class CLIP: load_device = model_management.text_encoder_device() offload_device = model_management.text_encoder_offload_device() params['device'] = load_device - self.cond_stage_model = clip(**(params)) if model_management.should_use_fp16(load_device): - self.cond_stage_model.half() + params['dtype'] = torch.float16 + else: + params['dtype'] = torch.float32 + + self.cond_stage_model = clip(**(params)) self.tokenizer = tokenizer(embedding_directory=embedding_directory) self.patcher = ModelPatcher(self.cond_stage_model, load_device=load_device, offload_device=offload_device) diff --git a/comfy/sd1_clip.py b/comfy/sd1_clip.py index c699214af..4a4e0fe30 100644 --- a/comfy/sd1_clip.py +++ b/comfy/sd1_clip.py @@ -43,7 +43,7 @@ class SD1ClipModel(torch.nn.Module, ClipTokenWeightEncoder): "hidden" ] def __init__(self, version="openai/clip-vit-large-patch14", device="cpu", max_length=77, - freeze=True, layer="last", layer_idx=None, textmodel_json_config=None, textmodel_path=None): # clip-vit-base-patch32 + freeze=True, layer="last", layer_idx=None, textmodel_json_config=None, textmodel_path=None, dtype=None): # clip-vit-base-patch32 super().__init__() assert layer in self.LAYERS self.num_layers = 12 @@ -54,10 +54,12 @@ class SD1ClipModel(torch.nn.Module, ClipTokenWeightEncoder): textmodel_json_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), "sd1_clip_config.json") config = CLIPTextConfig.from_json_file(textmodel_json_config) self.num_layers = config.num_hidden_layers - with comfy.ops.use_comfy_ops(): + with comfy.ops.use_comfy_ops(device, dtype): with modeling_utils.no_init_weights(): self.transformer = CLIPTextModel(config) + if dtype is not None: + self.transformer.to(dtype) self.max_length = max_length if freeze: self.freeze() diff --git a/comfy/sd2_clip.py b/comfy/sd2_clip.py index 1ffe31b62..818c9711e 100644 --- a/comfy/sd2_clip.py +++ b/comfy/sd2_clip.py @@ -3,13 +3,13 @@ import torch import os class SD2ClipModel(sd1_clip.SD1ClipModel): - def __init__(self, arch="ViT-H-14", device="cpu", max_length=77, freeze=True, layer="penultimate", layer_idx=None, textmodel_path=None): + def __init__(self, arch="ViT-H-14", device="cpu", max_length=77, freeze=True, layer="penultimate", layer_idx=None, textmodel_path=None, dtype=None): if layer == "penultimate": layer="hidden" layer_idx=23 textmodel_json_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), "sd2_clip_config.json") - super().__init__(device=device, freeze=freeze, layer=layer, layer_idx=layer_idx, textmodel_json_config=textmodel_json_config, textmodel_path=textmodel_path) + super().__init__(device=device, freeze=freeze, layer=layer, layer_idx=layer_idx, textmodel_json_config=textmodel_json_config, textmodel_path=textmodel_path, dtype=dtype) self.empty_tokens = [[49406] + [49407] + [0] * 75] def clip_layer(self, layer_idx): diff --git a/comfy/sdxl_clip.py b/comfy/sdxl_clip.py index 65d2bb20d..d05c0a9b9 100644 --- a/comfy/sdxl_clip.py +++ b/comfy/sdxl_clip.py @@ -3,13 +3,13 @@ import torch import os class SDXLClipG(sd1_clip.SD1ClipModel): - def __init__(self, device="cpu", max_length=77, freeze=True, layer="penultimate", layer_idx=None, textmodel_path=None): + def __init__(self, device="cpu", max_length=77, freeze=True, layer="penultimate", layer_idx=None, textmodel_path=None, dtype=None): if layer == "penultimate": layer="hidden" layer_idx=-2 textmodel_json_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), "clip_config_bigg.json") - super().__init__(device=device, freeze=freeze, layer=layer, layer_idx=layer_idx, textmodel_json_config=textmodel_json_config, textmodel_path=textmodel_path) + super().__init__(device=device, freeze=freeze, layer=layer, layer_idx=layer_idx, textmodel_json_config=textmodel_json_config, textmodel_path=textmodel_path, dtype=dtype) self.empty_tokens = [[49406] + [49407] + [0] * 75] self.text_projection = torch.nn.Parameter(torch.empty(1280, 1280)) self.logit_scale = torch.nn.Parameter(torch.tensor(4.6055)) @@ -42,11 +42,11 @@ class SDXLTokenizer(sd1_clip.SD1Tokenizer): return self.clip_g.untokenize(token_weight_pair) class SDXLClipModel(torch.nn.Module): - def __init__(self, device="cpu"): + def __init__(self, device="cpu", dtype=None): super().__init__() - self.clip_l = sd1_clip.SD1ClipModel(layer="hidden", layer_idx=11, device=device) + self.clip_l = sd1_clip.SD1ClipModel(layer="hidden", layer_idx=11, device=device, dtype=dtype) self.clip_l.layer_norm_hidden_state = False - self.clip_g = SDXLClipG(device=device) + self.clip_g = SDXLClipG(device=device, dtype=dtype) def clip_layer(self, layer_idx): self.clip_l.clip_layer(layer_idx) @@ -70,9 +70,9 @@ class SDXLClipModel(torch.nn.Module): return self.clip_l.load_sd(sd) class SDXLRefinerClipModel(torch.nn.Module): - def __init__(self, device="cpu"): + def __init__(self, device="cpu", dtype=None): super().__init__() - self.clip_g = SDXLClipG(device=device) + self.clip_g = SDXLClipG(device=device, dtype=dtype) def clip_layer(self, layer_idx): self.clip_g.clip_layer(layer_idx) From a6ef08a46a38063a0be46053b9ec39e9411f632e Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 23 Aug 2023 21:38:28 -0400 Subject: [PATCH 24/26] Even with forced fp16 the cpu device should never use it. --- comfy/model_management.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index 9c100144e..83053ea4c 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -573,11 +573,15 @@ def should_use_fp16(device=None, model_params=0): global xpu_available global directml_enabled + if device is not None: + if is_device_cpu(device): + return False + if FORCE_FP16: return True if device is not None: #TODO - if is_device_cpu(device) or is_device_mps(device): + if is_device_mps(device): return False if FORCE_FP32: From cc44ade79e458e0c0e267ab42c58a7cb6093c535 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 23 Aug 2023 21:45:00 -0400 Subject: [PATCH 25/26] Always shift text encoder to GPU when the device supports fp16. --- comfy/model_management.py | 7 +++---- comfy/sd.py | 2 +- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index 83053ea4c..544a945b3 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -432,8 +432,7 @@ def text_encoder_device(): if args.gpu_only: return get_torch_device() elif vram_state == VRAMState.HIGH_VRAM or vram_state == VRAMState.NORMAL_VRAM: - #NOTE: on a Ryzen 5 7600X with 4080 it's faster to shift to GPU - if should_use_fp16() or torch.get_num_threads() < 8: #leaving the text encoder on the CPU is faster than shifting it if the CPU is fast enough. + if should_use_fp16(prioritize_performance=False): return get_torch_device() else: return torch.device("cpu") @@ -569,7 +568,7 @@ def is_device_mps(device): return True return False -def should_use_fp16(device=None, model_params=0): +def should_use_fp16(device=None, model_params=0, prioritize_performance=True): global xpu_available global directml_enabled @@ -614,7 +613,7 @@ def should_use_fp16(device=None, model_params=0): if fp16_works: free_model_memory = (get_free_memory() * 0.9 - minimum_inference_memory()) - if model_params * 4 > free_model_memory: + if (not prioritize_performance) or model_params * 4 > free_model_memory: return True if props.major < 7: diff --git a/comfy/sd.py b/comfy/sd.py index 2a593fbc6..89df5a777 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -545,7 +545,7 @@ class CLIP: load_device = model_management.text_encoder_device() offload_device = model_management.text_encoder_offload_device() params['device'] = load_device - if model_management.should_use_fp16(load_device): + if model_management.should_use_fp16(load_device, prioritize_performance=False): params['dtype'] = torch.float16 else: params['dtype'] = torch.float32 From e3d0a9a490195911d0c200f8dbde2991b7421678 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 24 Aug 2023 00:54:16 -0400 Subject: [PATCH 26/26] Fix potential issue with text projection matrix multiplication. --- comfy/sd1_clip.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/sd1_clip.py b/comfy/sd1_clip.py index 4a4e0fe30..4616ca4e9 100644 --- a/comfy/sd1_clip.py +++ b/comfy/sd1_clip.py @@ -156,7 +156,7 @@ class SD1ClipModel(torch.nn.Module, ClipTokenWeightEncoder): pooled_output = outputs.pooler_output if self.text_projection is not None: - pooled_output = pooled_output.to(self.text_projection.device) @ self.text_projection + pooled_output = pooled_output.float().to(self.text_projection.device) @ self.text_projection.float() return z.float(), pooled_output.float() def encode(self, tokens):