From 1ea4d846911e938f857460de3eb7893011d6a8c4 Mon Sep 17 00:00:00 2001 From: asagi4 <130366179+asagi4@users.noreply.github.com> Date: Thu, 27 Jul 2023 21:14:09 +0300 Subject: [PATCH 01/46] Fix timestep ranges when batch_size > 1 --- comfy/samplers.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/comfy/samplers.py b/comfy/samplers.py index e059374d3..044d518a5 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -19,11 +19,11 @@ def sampling_function(model_function, x, timestep, uncond, cond, cond_scale, con strength = 1.0 if 'timestep_start' in cond[1]: timestep_start = cond[1]['timestep_start'] - if timestep_in > timestep_start: + if timestep_in[0] > timestep_start: return None if 'timestep_end' in cond[1]: timestep_end = cond[1]['timestep_end'] - if timestep_in < timestep_end: + if timestep_in[0] < timestep_end: return None if 'area' in cond[1]: area = cond[1]['area'] From 68be24eead8ad54ee0bbebc234e45472128bfef9 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 27 Jul 2023 16:12:43 -0400 Subject: [PATCH 02/46] Remove some prints. --- comfy/model_base.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/comfy/model_base.py b/comfy/model_base.py index 2d2d35814..d35f02a5b 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -164,7 +164,6 @@ class SDXLRefiner(BaseModel): else: aesthetic_score = kwargs.get("aesthetic_score", 6) - print(clip_pooled.shape, width, height, crop_w, crop_h, aesthetic_score) out = [] out.append(self.embedder(torch.Tensor([height]))) out.append(self.embedder(torch.Tensor([width]))) @@ -188,7 +187,6 @@ class SDXL(BaseModel): target_width = kwargs.get("target_width", width) target_height = kwargs.get("target_height", height) - print(clip_pooled.shape, width, height, crop_w, crop_h, target_width, target_height) out = [] out.append(self.embedder(torch.Tensor([height]))) out.append(self.embedder(torch.Tensor([width]))) From 1141029a4ace2e21a7787a8ca6b3bf65cb6457bd Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 28 Jul 2023 12:31:41 -0400 Subject: [PATCH 03/46] Add --disable-metadata argument to disable saving metadata in files. --- comfy/cli_args.py | 2 ++ comfy_extras/nodes_model_merging.py | 13 ++++++++----- nodes.py | 26 ++++++++++++++++---------- 3 files changed, 26 insertions(+), 15 deletions(-) diff --git a/comfy/cli_args.py b/comfy/cli_args.py index dc1597d88..83d8cd287 100644 --- a/comfy/cli_args.py +++ b/comfy/cli_args.py @@ -84,6 +84,8 @@ parser.add_argument("--dont-print-server", action="store_true", help="Don't prin parser.add_argument("--quick-test-for-ci", action="store_true", help="Quick test for CI.") parser.add_argument("--windows-standalone-build", action="store_true", help="Windows standalone build: Enable convenient things that most people using the standalone windows build will probably enjoy (like auto opening the page on startup).") +parser.add_argument("--disable-metadata", action="store_true", help="Disable saving prompt metadata in files.") + args = parser.parse_args() if args.windows_standalone_build: diff --git a/comfy_extras/nodes_model_merging.py b/comfy_extras/nodes_model_merging.py index 6146c4500..bce4b3dd0 100644 --- a/comfy_extras/nodes_model_merging.py +++ b/comfy_extras/nodes_model_merging.py @@ -6,6 +6,8 @@ import folder_paths import json import os +from comfy.cli_args import args + class ModelMergeSimple: @classmethod def INPUT_TYPES(s): @@ -101,8 +103,7 @@ class CheckpointSave: if prompt is not None: prompt_info = json.dumps(prompt) - metadata = {"prompt": prompt_info} - + metadata = {} enable_modelspec = True if isinstance(model.model, comfy.model_base.SDXL): @@ -127,9 +128,11 @@ class CheckpointSave: elif model.model.model_type == comfy.model_base.ModelType.V_PREDICTION: metadata["modelspec.predict_key"] = "v" - if extra_pnginfo is not None: - for x in extra_pnginfo: - metadata[x] = json.dumps(extra_pnginfo[x]) + if not args.disable_metadata: + metadata["prompt"] = prompt_info + if extra_pnginfo is not None: + for x in extra_pnginfo: + metadata[x] = json.dumps(extra_pnginfo[x]) output_checkpoint = f"{filename}_{counter:05}_.safetensors" output_checkpoint = os.path.join(full_output_folder, output_checkpoint) diff --git a/nodes.py b/nodes.py index b0d05f0d7..240619ed1 100644 --- a/nodes.py +++ b/nodes.py @@ -26,6 +26,8 @@ import comfy.utils import comfy.clip_vision import comfy.model_management +from comfy.cli_args import args + import importlib import folder_paths @@ -352,10 +354,12 @@ class SaveLatent: if prompt is not None: prompt_info = json.dumps(prompt) - metadata = {"prompt": prompt_info} - if extra_pnginfo is not None: - for x in extra_pnginfo: - metadata[x] = json.dumps(extra_pnginfo[x]) + metadata = None + if not args.disable_metadata: + metadata = {"prompt": prompt_info} + if extra_pnginfo is not None: + for x in extra_pnginfo: + metadata[x] = json.dumps(extra_pnginfo[x]) file = f"{filename}_{counter:05}_.latent" file = os.path.join(full_output_folder, file) @@ -1214,12 +1218,14 @@ class SaveImage: for image in images: i = 255. * image.cpu().numpy() img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8)) - metadata = PngInfo() - if prompt is not None: - metadata.add_text("prompt", json.dumps(prompt)) - if extra_pnginfo is not None: - for x in extra_pnginfo: - metadata.add_text(x, json.dumps(extra_pnginfo[x])) + metadata = None + if not args.disable_metadata: + metadata = PngInfo() + if prompt is not None: + metadata.add_text("prompt", json.dumps(prompt)) + if extra_pnginfo is not None: + for x in extra_pnginfo: + metadata.add_text(x, json.dumps(extra_pnginfo[x])) file = f"{filename}_{counter:05}_.png" img.save(os.path.join(full_output_folder, file), pnginfo=metadata, compress_level=4) From c910b4a01ca58b04e5d4ab4c747680b996ada02b Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 28 Jul 2023 21:32:27 -0400 Subject: [PATCH 04/46] Remove unused code and torchdiffeq dependency. --- comfy/k_diffusion/sampling.py | 25 ------------------------- requirements.txt | 1 - 2 files changed, 26 deletions(-) diff --git a/comfy/k_diffusion/sampling.py b/comfy/k_diffusion/sampling.py index 3b4e99315..dd234435f 100644 --- a/comfy/k_diffusion/sampling.py +++ b/comfy/k_diffusion/sampling.py @@ -3,7 +3,6 @@ import math from scipy import integrate import torch from torch import nn -from torchdiffeq import odeint import torchsde from tqdm.auto import trange, tqdm @@ -287,30 +286,6 @@ def sample_lms(model, x, sigmas, extra_args=None, callback=None, disable=None, o return x -@torch.no_grad() -def log_likelihood(model, x, sigma_min, sigma_max, extra_args=None, atol=1e-4, rtol=1e-4): - extra_args = {} if extra_args is None else extra_args - s_in = x.new_ones([x.shape[0]]) - v = torch.randint_like(x, 2) * 2 - 1 - fevals = 0 - def ode_fn(sigma, x): - nonlocal fevals - with torch.enable_grad(): - x = x[0].detach().requires_grad_() - denoised = model(x, sigma * s_in, **extra_args) - d = to_d(x, sigma, denoised) - fevals += 1 - grad = torch.autograd.grad((d * v).sum(), x)[0] - d_ll = (v * grad).flatten(1).sum(1) - return d.detach(), d_ll - x_min = x, x.new_zeros([x.shape[0]]) - t = x.new_tensor([sigma_min, sigma_max]) - sol = odeint(ode_fn, x_min, t, atol=atol, rtol=rtol, method='dopri5') - latent, delta_ll = sol[0][-1], sol[1][-1] - ll_prior = torch.distributions.Normal(0, sigma_max).log_prob(latent).flatten(1).sum(1) - return ll_prior + delta_ll, {'fevals': fevals} - - class PIDStepSizeController: """A PID controller for ODE adaptive step size control.""" def __init__(self, h, pcoeff, icoeff, dcoeff, order=1, accept_safety=0.81, eps=1e-8): diff --git a/requirements.txt b/requirements.txt index d632edf79..8ee7b83d1 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,4 @@ torch -torchdiffeq torchsde einops transformers>=4.25.1 From d3d9ad00d882f205a1fe7a04b6d58ebd499f48a0 Mon Sep 17 00:00:00 2001 From: Guillaume Faguet Date: Sat, 29 Jul 2023 14:48:00 +0200 Subject: [PATCH 05/46] added slider and toggle widget --- web/scripts/widgets.js | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/web/scripts/widgets.js b/web/scripts/widgets.js index a8afc29b0..7b5f9c6b3 100644 --- a/web/scripts/widgets.js +++ b/web/scripts/widgets.js @@ -273,6 +273,33 @@ export const ComfyWidgets = { ), }; }, + SLIDER(node, inputName, inputData) { + const { val, config } = getNumberDefaults(inputData, 1); + Object.assign(config, { precision: 0 }); + return { + widget: node.addWidget( + "slider", + inputName, + val, + function (v) { + const s = this.options.step / 10; + this.value = Math.round(v / s) * s; + }, + config + ), + }; + }, + TOGGLE(node, inputName, inputData) { + let defaultVal = inputData[1]["default"]; + return { + widget: node.addWidget( + "toggle", + inputName, + defaultVal, + () => {}, + ) + }; + }, STRING(node, inputName, inputData, app) { const defaultVal = inputData[1].default || ""; const multiline = !!inputData[1].multiline; From ad5866b02bce078411b04abab678d1638c43653a Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sat, 29 Jul 2023 14:48:29 -0400 Subject: [PATCH 06/46] Fix ROCm nightly install command. --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index ad85d3d49..f62d4289a 100644 --- a/README.md +++ b/README.md @@ -94,7 +94,7 @@ AMD users can install rocm and pytorch with pip if you don't have it already ins ```pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/rocm5.4.2``` This is the command to install the nightly with ROCm 5.6 that supports the 7000 series and might have some performance improvements: -```pip install --pre torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/rocm5.6 -r requirements.txt``` +```pip install --pre torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/rocm5.6``` ### NVIDIA From 4b957a0010832ae6cb9553683ca6bef6272b4ccc Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sat, 29 Jul 2023 14:51:56 -0400 Subject: [PATCH 07/46] Initialize the unet directly on the target device. --- comfy/ldm/modules/attention.py | 94 +++++++++---------- .../modules/diffusionmodules/openaimodel.py | 80 +++++++++------- comfy/model_base.py | 20 ++-- comfy/sd.py | 3 +- comfy/supported_models.py | 8 +- comfy/supported_models_base.py | 8 +- 6 files changed, 110 insertions(+), 103 deletions(-) diff --git a/comfy/ldm/modules/attention.py b/comfy/ldm/modules/attention.py index 1379b7704..573cea6ac 100644 --- a/comfy/ldm/modules/attention.py +++ b/comfy/ldm/modules/attention.py @@ -52,9 +52,9 @@ def init_(tensor): # feedforward class GEGLU(nn.Module): - def __init__(self, dim_in, dim_out, dtype=None): + def __init__(self, dim_in, dim_out, dtype=None, device=None): super().__init__() - self.proj = comfy.ops.Linear(dim_in, dim_out * 2, dtype=dtype) + self.proj = comfy.ops.Linear(dim_in, dim_out * 2, dtype=dtype, device=device) def forward(self, x): x, gate = self.proj(x).chunk(2, dim=-1) @@ -62,19 +62,19 @@ class GEGLU(nn.Module): class FeedForward(nn.Module): - def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0., dtype=None): + def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0., dtype=None, device=None): super().__init__() inner_dim = int(dim * mult) dim_out = default(dim_out, dim) project_in = nn.Sequential( - comfy.ops.Linear(dim, inner_dim, dtype=dtype), + comfy.ops.Linear(dim, inner_dim, dtype=dtype, device=device), nn.GELU() - ) if not glu else GEGLU(dim, inner_dim, dtype=dtype) + ) if not glu else GEGLU(dim, inner_dim, dtype=dtype, device=device) self.net = nn.Sequential( project_in, nn.Dropout(dropout), - comfy.ops.Linear(inner_dim, dim_out, dtype=dtype) + comfy.ops.Linear(inner_dim, dim_out, dtype=dtype, device=device) ) def forward(self, x): @@ -90,8 +90,8 @@ def zero_module(module): return module -def Normalize(in_channels, dtype=None): - return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True, dtype=dtype) +def Normalize(in_channels, dtype=None, device=None): + return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True, dtype=dtype, device=device) class SpatialSelfAttention(nn.Module): @@ -148,7 +148,7 @@ class SpatialSelfAttention(nn.Module): class CrossAttentionBirchSan(nn.Module): - def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0., dtype=None): + def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0., dtype=None, device=None): super().__init__() inner_dim = dim_head * heads context_dim = default(context_dim, query_dim) @@ -156,12 +156,12 @@ class CrossAttentionBirchSan(nn.Module): self.scale = dim_head ** -0.5 self.heads = heads - self.to_q = comfy.ops.Linear(query_dim, inner_dim, bias=False, dtype=dtype) - self.to_k = comfy.ops.Linear(context_dim, inner_dim, bias=False, dtype=dtype) - self.to_v = comfy.ops.Linear(context_dim, inner_dim, bias=False, dtype=dtype) + self.to_q = comfy.ops.Linear(query_dim, inner_dim, bias=False, dtype=dtype, device=device) + self.to_k = comfy.ops.Linear(context_dim, inner_dim, bias=False, dtype=dtype, device=device) + self.to_v = comfy.ops.Linear(context_dim, inner_dim, bias=False, dtype=dtype, device=device) self.to_out = nn.Sequential( - comfy.ops.Linear(inner_dim, query_dim, dtype=dtype), + comfy.ops.Linear(inner_dim, query_dim, dtype=dtype, device=device), nn.Dropout(dropout) ) @@ -245,7 +245,7 @@ class CrossAttentionBirchSan(nn.Module): class CrossAttentionDoggettx(nn.Module): - def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0., dtype=None): + def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0., dtype=None, device=None): super().__init__() inner_dim = dim_head * heads context_dim = default(context_dim, query_dim) @@ -253,12 +253,12 @@ class CrossAttentionDoggettx(nn.Module): self.scale = dim_head ** -0.5 self.heads = heads - self.to_q = comfy.ops.Linear(query_dim, inner_dim, bias=False, dtype=dtype) - self.to_k = comfy.ops.Linear(context_dim, inner_dim, bias=False, dtype=dtype) - self.to_v = comfy.ops.Linear(context_dim, inner_dim, bias=False, dtype=dtype) + self.to_q = comfy.ops.Linear(query_dim, inner_dim, bias=False, dtype=dtype, device=device) + self.to_k = comfy.ops.Linear(context_dim, inner_dim, bias=False, dtype=dtype, device=device) + self.to_v = comfy.ops.Linear(context_dim, inner_dim, bias=False, dtype=dtype, device=device) self.to_out = nn.Sequential( - comfy.ops.Linear(inner_dim, query_dim, dtype=dtype), + comfy.ops.Linear(inner_dim, query_dim, dtype=dtype, device=device), nn.Dropout(dropout) ) @@ -343,7 +343,7 @@ class CrossAttentionDoggettx(nn.Module): return self.to_out(r2) class CrossAttention(nn.Module): - def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0., dtype=None): + def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0., dtype=None, device=None): super().__init__() inner_dim = dim_head * heads context_dim = default(context_dim, query_dim) @@ -351,12 +351,12 @@ class CrossAttention(nn.Module): self.scale = dim_head ** -0.5 self.heads = heads - self.to_q = comfy.ops.Linear(query_dim, inner_dim, bias=False, dtype=dtype) - self.to_k = comfy.ops.Linear(context_dim, inner_dim, bias=False, dtype=dtype) - self.to_v = comfy.ops.Linear(context_dim, inner_dim, bias=False, dtype=dtype) + self.to_q = comfy.ops.Linear(query_dim, inner_dim, bias=False, dtype=dtype, device=device) + self.to_k = comfy.ops.Linear(context_dim, inner_dim, bias=False, dtype=dtype, device=device) + self.to_v = comfy.ops.Linear(context_dim, inner_dim, bias=False, dtype=dtype, device=device) self.to_out = nn.Sequential( - comfy.ops.Linear(inner_dim, query_dim, dtype=dtype), + comfy.ops.Linear(inner_dim, query_dim, dtype=dtype, device=device), nn.Dropout(dropout) ) @@ -399,7 +399,7 @@ class CrossAttention(nn.Module): class MemoryEfficientCrossAttention(nn.Module): # https://github.com/MatthieuTPHR/diffusers/blob/d80b531ff8060ec1ea982b65a1b8df70f73aa67c/src/diffusers/models/attention.py#L223 - def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.0, dtype=None): + def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.0, dtype=None, device=None): super().__init__() print(f"Setting up {self.__class__.__name__}. Query dim is {query_dim}, context_dim is {context_dim} and using " f"{heads} heads.") @@ -409,11 +409,11 @@ class MemoryEfficientCrossAttention(nn.Module): self.heads = heads self.dim_head = dim_head - self.to_q = comfy.ops.Linear(query_dim, inner_dim, bias=False, dtype=dtype) - self.to_k = comfy.ops.Linear(context_dim, inner_dim, bias=False, dtype=dtype) - self.to_v = comfy.ops.Linear(context_dim, inner_dim, bias=False, dtype=dtype) + self.to_q = comfy.ops.Linear(query_dim, inner_dim, bias=False, dtype=dtype, device=device) + self.to_k = comfy.ops.Linear(context_dim, inner_dim, bias=False, dtype=dtype, device=device) + self.to_v = comfy.ops.Linear(context_dim, inner_dim, bias=False, dtype=dtype, device=device) - self.to_out = nn.Sequential(comfy.ops.Linear(inner_dim, query_dim, dtype=dtype), nn.Dropout(dropout)) + self.to_out = nn.Sequential(comfy.ops.Linear(inner_dim, query_dim, dtype=dtype, device=device), nn.Dropout(dropout)) self.attention_op: Optional[Any] = None def forward(self, x, context=None, value=None, mask=None): @@ -450,7 +450,7 @@ class MemoryEfficientCrossAttention(nn.Module): return self.to_out(out) class CrossAttentionPytorch(nn.Module): - def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0., dtype=None): + def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0., dtype=None, device=None): super().__init__() inner_dim = dim_head * heads context_dim = default(context_dim, query_dim) @@ -458,11 +458,11 @@ class CrossAttentionPytorch(nn.Module): self.heads = heads self.dim_head = dim_head - self.to_q = comfy.ops.Linear(query_dim, inner_dim, bias=False, dtype=dtype) - self.to_k = comfy.ops.Linear(context_dim, inner_dim, bias=False, dtype=dtype) - self.to_v = comfy.ops.Linear(context_dim, inner_dim, bias=False, dtype=dtype) + self.to_q = comfy.ops.Linear(query_dim, inner_dim, bias=False, dtype=dtype, device=device) + self.to_k = comfy.ops.Linear(context_dim, inner_dim, bias=False, dtype=dtype, device=device) + self.to_v = comfy.ops.Linear(context_dim, inner_dim, bias=False, dtype=dtype, device=device) - self.to_out = nn.Sequential(comfy.ops.Linear(inner_dim, query_dim, dtype=dtype), nn.Dropout(dropout)) + self.to_out = nn.Sequential(comfy.ops.Linear(inner_dim, query_dim, dtype=dtype, device=device), nn.Dropout(dropout)) self.attention_op: Optional[Any] = None def forward(self, x, context=None, value=None, mask=None): @@ -508,17 +508,17 @@ else: class BasicTransformerBlock(nn.Module): def __init__(self, dim, n_heads, d_head, dropout=0., context_dim=None, gated_ff=True, checkpoint=True, - disable_self_attn=False, dtype=None): + disable_self_attn=False, dtype=None, device=None): super().__init__() self.disable_self_attn = disable_self_attn self.attn1 = CrossAttention(query_dim=dim, heads=n_heads, dim_head=d_head, dropout=dropout, - context_dim=context_dim if self.disable_self_attn else None, dtype=dtype) # is a self-attention if not self.disable_self_attn - self.ff = FeedForward(dim, dropout=dropout, glu=gated_ff, dtype=dtype) + context_dim=context_dim if self.disable_self_attn else None, dtype=dtype, device=device) # is a self-attention if not self.disable_self_attn + self.ff = FeedForward(dim, dropout=dropout, glu=gated_ff, dtype=dtype, device=device) self.attn2 = CrossAttention(query_dim=dim, context_dim=context_dim, - heads=n_heads, dim_head=d_head, dropout=dropout, dtype=dtype) # is self-attn if context is none - self.norm1 = nn.LayerNorm(dim, dtype=dtype) - self.norm2 = nn.LayerNorm(dim, dtype=dtype) - self.norm3 = nn.LayerNorm(dim, dtype=dtype) + heads=n_heads, dim_head=d_head, dropout=dropout, dtype=dtype, device=device) # is self-attn if context is none + self.norm1 = nn.LayerNorm(dim, dtype=dtype, device=device) + self.norm2 = nn.LayerNorm(dim, dtype=dtype, device=device) + self.norm3 = nn.LayerNorm(dim, dtype=dtype, device=device) self.checkpoint = checkpoint self.n_heads = n_heads self.d_head = d_head @@ -648,34 +648,34 @@ class SpatialTransformer(nn.Module): def __init__(self, in_channels, n_heads, d_head, depth=1, dropout=0., context_dim=None, disable_self_attn=False, use_linear=False, - use_checkpoint=True, dtype=None): + use_checkpoint=True, dtype=None, device=None): super().__init__() if exists(context_dim) and not isinstance(context_dim, list): context_dim = [context_dim] * depth self.in_channels = in_channels inner_dim = n_heads * d_head - self.norm = Normalize(in_channels, dtype=dtype) + self.norm = Normalize(in_channels, dtype=dtype, device=device) if not use_linear: self.proj_in = nn.Conv2d(in_channels, inner_dim, kernel_size=1, stride=1, - padding=0, dtype=dtype) + padding=0, dtype=dtype, device=device) else: - self.proj_in = comfy.ops.Linear(in_channels, inner_dim, dtype=dtype) + self.proj_in = comfy.ops.Linear(in_channels, inner_dim, dtype=dtype, device=device) self.transformer_blocks = nn.ModuleList( [BasicTransformerBlock(inner_dim, n_heads, d_head, dropout=dropout, context_dim=context_dim[d], - disable_self_attn=disable_self_attn, checkpoint=use_checkpoint, dtype=dtype) + disable_self_attn=disable_self_attn, checkpoint=use_checkpoint, dtype=dtype, device=device) for d in range(depth)] ) if not use_linear: self.proj_out = nn.Conv2d(inner_dim,in_channels, kernel_size=1, stride=1, - padding=0, dtype=dtype) + padding=0, dtype=dtype, device=device) else: - self.proj_out = comfy.ops.Linear(in_channels, inner_dim, dtype=dtype) + self.proj_out = comfy.ops.Linear(in_channels, inner_dim, dtype=dtype, device=device) self.use_linear = use_linear def forward(self, x, context=None, transformer_options={}): diff --git a/comfy/ldm/modules/diffusionmodules/openaimodel.py b/comfy/ldm/modules/diffusionmodules/openaimodel.py index 92f2438ef..40060372e 100644 --- a/comfy/ldm/modules/diffusionmodules/openaimodel.py +++ b/comfy/ldm/modules/diffusionmodules/openaimodel.py @@ -111,14 +111,14 @@ class Upsample(nn.Module): upsampling occurs in the inner-two dimensions. """ - def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1, dtype=None): + def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1, dtype=None, device=None): super().__init__() self.channels = channels self.out_channels = out_channels or channels self.use_conv = use_conv self.dims = dims if use_conv: - self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=padding, dtype=dtype) + self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=padding, dtype=dtype, device=device) def forward(self, x, output_shape=None): assert x.shape[1] == self.channels @@ -160,7 +160,7 @@ class Downsample(nn.Module): downsampling occurs in the inner-two dimensions. """ - def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1, dtype=None): + def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1, dtype=None, device=None): super().__init__() self.channels = channels self.out_channels = out_channels or channels @@ -169,7 +169,7 @@ class Downsample(nn.Module): stride = 2 if dims != 3 else (1, 2, 2) if use_conv: self.op = conv_nd( - dims, self.channels, self.out_channels, 3, stride=stride, padding=padding, dtype=dtype + dims, self.channels, self.out_channels, 3, stride=stride, padding=padding, dtype=dtype, device=device ) else: assert self.channels == self.out_channels @@ -208,7 +208,8 @@ class ResBlock(TimestepBlock): use_checkpoint=False, up=False, down=False, - dtype=None + dtype=None, + device=None, ): super().__init__() self.channels = channels @@ -220,19 +221,19 @@ class ResBlock(TimestepBlock): self.use_scale_shift_norm = use_scale_shift_norm self.in_layers = nn.Sequential( - nn.GroupNorm(32, channels, dtype=dtype), + nn.GroupNorm(32, channels, dtype=dtype, device=device), nn.SiLU(), - conv_nd(dims, channels, self.out_channels, 3, padding=1, dtype=dtype), + conv_nd(dims, channels, self.out_channels, 3, padding=1, dtype=dtype, device=device), ) self.updown = up or down if up: - self.h_upd = Upsample(channels, False, dims, dtype=dtype) - self.x_upd = Upsample(channels, False, dims, dtype=dtype) + self.h_upd = Upsample(channels, False, dims, dtype=dtype, device=device) + self.x_upd = Upsample(channels, False, dims, dtype=dtype, device=device) elif down: - self.h_upd = Downsample(channels, False, dims, dtype=dtype) - self.x_upd = Downsample(channels, False, dims, dtype=dtype) + self.h_upd = Downsample(channels, False, dims, dtype=dtype, device=device) + self.x_upd = Downsample(channels, False, dims, dtype=dtype, device=device) else: self.h_upd = self.x_upd = nn.Identity() @@ -240,15 +241,15 @@ class ResBlock(TimestepBlock): nn.SiLU(), linear( emb_channels, - 2 * self.out_channels if use_scale_shift_norm else self.out_channels, dtype=dtype + 2 * self.out_channels if use_scale_shift_norm else self.out_channels, dtype=dtype, device=device ), ) self.out_layers = nn.Sequential( - nn.GroupNorm(32, self.out_channels, dtype=dtype), + nn.GroupNorm(32, self.out_channels, dtype=dtype, device=device), nn.SiLU(), nn.Dropout(p=dropout), zero_module( - conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1, dtype=dtype) + conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1, dtype=dtype, device=device) ), ) @@ -256,10 +257,10 @@ class ResBlock(TimestepBlock): self.skip_connection = nn.Identity() elif use_conv: self.skip_connection = conv_nd( - dims, channels, self.out_channels, 3, padding=1, dtype=dtype + dims, channels, self.out_channels, 3, padding=1, dtype=dtype, device=device ) else: - self.skip_connection = conv_nd(dims, channels, self.out_channels, 1, dtype=dtype) + self.skip_connection = conv_nd(dims, channels, self.out_channels, 1, dtype=dtype, device=device) def forward(self, x, emb): """ @@ -503,6 +504,7 @@ class UNetModel(nn.Module): use_linear_in_transformer=False, adm_in_channels=None, transformer_depth_middle=None, + device=None, ): super().__init__() if use_spatial_transformer: @@ -564,9 +566,9 @@ class UNetModel(nn.Module): time_embed_dim = model_channels * 4 self.time_embed = nn.Sequential( - linear(model_channels, time_embed_dim, dtype=self.dtype), + linear(model_channels, time_embed_dim, dtype=self.dtype, device=device), nn.SiLU(), - linear(time_embed_dim, time_embed_dim, dtype=self.dtype), + linear(time_embed_dim, time_embed_dim, dtype=self.dtype, device=device), ) if self.num_classes is not None: @@ -579,9 +581,9 @@ class UNetModel(nn.Module): assert adm_in_channels is not None self.label_emb = nn.Sequential( nn.Sequential( - linear(adm_in_channels, time_embed_dim, dtype=self.dtype), + linear(adm_in_channels, time_embed_dim, dtype=self.dtype, device=device), nn.SiLU(), - linear(time_embed_dim, time_embed_dim, dtype=self.dtype), + linear(time_embed_dim, time_embed_dim, dtype=self.dtype, device=device), ) ) else: @@ -590,7 +592,7 @@ class UNetModel(nn.Module): self.input_blocks = nn.ModuleList( [ TimestepEmbedSequential( - conv_nd(dims, in_channels, model_channels, 3, padding=1, dtype=self.dtype) + conv_nd(dims, in_channels, model_channels, 3, padding=1, dtype=self.dtype, device=device) ) ] ) @@ -609,7 +611,8 @@ class UNetModel(nn.Module): dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, - dtype=self.dtype + dtype=self.dtype, + device=device, ) ] ch = mult * model_channels @@ -638,7 +641,7 @@ class UNetModel(nn.Module): ) if not use_spatial_transformer else SpatialTransformer( ch, num_heads, dim_head, depth=transformer_depth[level], context_dim=context_dim, disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer, - use_checkpoint=use_checkpoint, dtype=self.dtype + use_checkpoint=use_checkpoint, dtype=self.dtype, device=device ) ) self.input_blocks.append(TimestepEmbedSequential(*layers)) @@ -657,11 +660,12 @@ class UNetModel(nn.Module): use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, down=True, - dtype=self.dtype + dtype=self.dtype, + device=device, ) if resblock_updown else Downsample( - ch, conv_resample, dims=dims, out_channels=out_ch, dtype=self.dtype + ch, conv_resample, dims=dims, out_channels=out_ch, dtype=self.dtype, device=device ) ) ) @@ -686,7 +690,8 @@ class UNetModel(nn.Module): dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, - dtype=self.dtype + dtype=self.dtype, + device=device, ), AttentionBlock( ch, @@ -697,7 +702,7 @@ class UNetModel(nn.Module): ) if not use_spatial_transformer else SpatialTransformer( # always uses a self-attn ch, num_heads, dim_head, depth=transformer_depth_middle, context_dim=context_dim, disable_self_attn=disable_middle_self_attn, use_linear=use_linear_in_transformer, - use_checkpoint=use_checkpoint, dtype=self.dtype + use_checkpoint=use_checkpoint, dtype=self.dtype, device=device ), ResBlock( ch, @@ -706,7 +711,8 @@ class UNetModel(nn.Module): dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, - dtype=self.dtype + dtype=self.dtype, + device=device, ), ) self._feature_size += ch @@ -724,7 +730,8 @@ class UNetModel(nn.Module): dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, - dtype=self.dtype + dtype=self.dtype, + device=device, ) ] ch = model_channels * mult @@ -753,7 +760,7 @@ class UNetModel(nn.Module): ) if not use_spatial_transformer else SpatialTransformer( ch, num_heads, dim_head, depth=transformer_depth[level], context_dim=context_dim, disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer, - use_checkpoint=use_checkpoint, dtype=self.dtype + use_checkpoint=use_checkpoint, dtype=self.dtype, device=device ) ) if level and i == self.num_res_blocks[level]: @@ -768,24 +775,25 @@ class UNetModel(nn.Module): use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, up=True, - dtype=self.dtype + dtype=self.dtype, + device=device, ) if resblock_updown - else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch, dtype=self.dtype) + else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch, dtype=self.dtype, device=device) ) ds //= 2 self.output_blocks.append(TimestepEmbedSequential(*layers)) self._feature_size += ch self.out = nn.Sequential( - nn.GroupNorm(32, ch, dtype=self.dtype), + nn.GroupNorm(32, ch, dtype=self.dtype, device=device), nn.SiLU(), - zero_module(conv_nd(dims, model_channels, out_channels, 3, padding=1, dtype=self.dtype)), + zero_module(conv_nd(dims, model_channels, out_channels, 3, padding=1, dtype=self.dtype, device=device)), ) if self.predict_codebook_ids: self.id_predictor = nn.Sequential( - nn.GroupNorm(32, ch, dtype=self.dtype), - conv_nd(dims, model_channels, n_embed, 1), + nn.GroupNorm(32, ch, dtype=self.dtype, device=device), + conv_nd(dims, model_channels, n_embed, 1, dtype=self.dtype, device=device), #nn.LogSoftmax(dim=1) # change to cross_entropy and produce non-normalized logits ) diff --git a/comfy/model_base.py b/comfy/model_base.py index d35f02a5b..bf6983fc2 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -12,14 +12,14 @@ class ModelType(Enum): V_PREDICTION = 2 class BaseModel(torch.nn.Module): - def __init__(self, model_config, model_type=ModelType.EPS): + def __init__(self, model_config, model_type=ModelType.EPS, device=None): super().__init__() unet_config = model_config.unet_config self.latent_format = model_config.latent_format self.model_config = model_config self.register_schedule(given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=0.00085, linear_end=0.012, cosine_s=8e-3) - self.diffusion_model = UNetModel(**unet_config) + self.diffusion_model = UNetModel(**unet_config, device=device) self.model_type = model_type self.adm_channels = unet_config.get("adm_in_channels", None) if self.adm_channels is None: @@ -107,8 +107,8 @@ class BaseModel(torch.nn.Module): class SD21UNCLIP(BaseModel): - def __init__(self, model_config, noise_aug_config, model_type=ModelType.V_PREDICTION): - super().__init__(model_config, model_type) + def __init__(self, model_config, noise_aug_config, model_type=ModelType.V_PREDICTION, device=None): + super().__init__(model_config, model_type, device=device) self.noise_augmentor = CLIPEmbeddingNoiseAugmentation(**noise_aug_config) def encode_adm(self, **kwargs): @@ -143,13 +143,13 @@ class SD21UNCLIP(BaseModel): return adm_out class SDInpaint(BaseModel): - def __init__(self, model_config, model_type=ModelType.EPS): - super().__init__(model_config, model_type) + def __init__(self, model_config, model_type=ModelType.EPS, device=None): + super().__init__(model_config, model_type, device=device) self.concat_keys = ("mask", "masked_image") class SDXLRefiner(BaseModel): - def __init__(self, model_config, model_type=ModelType.EPS): - super().__init__(model_config, model_type) + def __init__(self, model_config, model_type=ModelType.EPS, device=None): + super().__init__(model_config, model_type, device=device) self.embedder = Timestep(256) def encode_adm(self, **kwargs): @@ -174,8 +174,8 @@ class SDXLRefiner(BaseModel): return torch.cat((clip_pooled.to(flat.device), flat), dim=1) class SDXL(BaseModel): - def __init__(self, model_config, model_type=ModelType.EPS): - super().__init__(model_config, model_type) + def __init__(self, model_config, model_type=ModelType.EPS, device=None): + super().__init__(model_config, model_type, device=device) self.embedder = Timestep(256) def encode_adm(self, **kwargs): diff --git a/comfy/sd.py b/comfy/sd.py index 70701ab6b..922cbf21e 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -1169,8 +1169,7 @@ def load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, o clipvision = clip_vision.load_clipvision_from_sd(sd, model_config.clip_vision_prefix, True) offload_device = model_management.unet_offload_device() - model = model_config.get_model(sd, "model.diffusion_model.") - model = model.to(offload_device) + model = model_config.get_model(sd, "model.diffusion_model.", device=offload_device) model.load_model_weights(sd, "model.diffusion_model.") if output_vae: diff --git a/comfy/supported_models.py b/comfy/supported_models.py index b1c01fe87..95fc8f3f5 100644 --- a/comfy/supported_models.py +++ b/comfy/supported_models.py @@ -109,8 +109,8 @@ class SDXLRefiner(supported_models_base.BASE): latent_format = latent_formats.SDXL - def get_model(self, state_dict, prefix=""): - return model_base.SDXLRefiner(self) + def get_model(self, state_dict, prefix="", device=None): + return model_base.SDXLRefiner(self, device=device) def process_clip_state_dict(self, state_dict): keys_to_replace = {} @@ -152,8 +152,8 @@ class SDXL(supported_models_base.BASE): else: return model_base.ModelType.EPS - def get_model(self, state_dict, prefix=""): - return model_base.SDXL(self, model_type=self.model_type(state_dict, prefix)) + def get_model(self, state_dict, prefix="", device=None): + return model_base.SDXL(self, model_type=self.model_type(state_dict, prefix), device=device) def process_clip_state_dict(self, state_dict): keys_to_replace = {} diff --git a/comfy/supported_models_base.py b/comfy/supported_models_base.py index c5db66274..d0088bbd5 100644 --- a/comfy/supported_models_base.py +++ b/comfy/supported_models_base.py @@ -53,13 +53,13 @@ class BASE: for x in self.unet_extra_config: self.unet_config[x] = self.unet_extra_config[x] - def get_model(self, state_dict, prefix=""): + def get_model(self, state_dict, prefix="", device=None): if self.inpaint_model(): - return model_base.SDInpaint(self, model_type=self.model_type(state_dict, prefix)) + return model_base.SDInpaint(self, model_type=self.model_type(state_dict, prefix), device=device) elif self.noise_aug_config is not None: - return model_base.SD21UNCLIP(self, self.noise_aug_config, model_type=self.model_type(state_dict, prefix)) + return model_base.SD21UNCLIP(self, self.noise_aug_config, model_type=self.model_type(state_dict, prefix), device=device) else: - return model_base.BaseModel(self, model_type=self.model_type(state_dict, prefix)) + return model_base.BaseModel(self, model_type=self.model_type(state_dict, prefix), device=device) def process_clip_state_dict(self, state_dict): return state_dict From 95d796fc85608272a9bf06a8c6c1f45912179118 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sat, 29 Jul 2023 16:28:30 -0400 Subject: [PATCH 08/46] Faster VAE loading. --- comfy/ldm/modules/diffusionmodules/model.py | 55 +++++++++++---------- 1 file changed, 28 insertions(+), 27 deletions(-) diff --git a/comfy/ldm/modules/diffusionmodules/model.py b/comfy/ldm/modules/diffusionmodules/model.py index 69ab21cdc..b596408d3 100644 --- a/comfy/ldm/modules/diffusionmodules/model.py +++ b/comfy/ldm/modules/diffusionmodules/model.py @@ -8,6 +8,7 @@ from typing import Optional, Any from ..attention import MemoryEfficientCrossAttention from comfy import model_management +import comfy.ops if model_management.xformers_enabled_vae(): import xformers @@ -48,7 +49,7 @@ class Upsample(nn.Module): super().__init__() self.with_conv = with_conv if self.with_conv: - self.conv = torch.nn.Conv2d(in_channels, + self.conv = comfy.ops.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, @@ -67,7 +68,7 @@ class Downsample(nn.Module): self.with_conv = with_conv if self.with_conv: # no asymmetric padding in torch conv, must do it ourselves - self.conv = torch.nn.Conv2d(in_channels, + self.conv = comfy.ops.Conv2d(in_channels, in_channels, kernel_size=3, stride=2, @@ -95,30 +96,30 @@ class ResnetBlock(nn.Module): self.swish = torch.nn.SiLU(inplace=True) self.norm1 = Normalize(in_channels) - self.conv1 = torch.nn.Conv2d(in_channels, + self.conv1 = comfy.ops.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1) if temb_channels > 0: - self.temb_proj = torch.nn.Linear(temb_channels, + self.temb_proj = comfy.ops.Linear(temb_channels, out_channels) self.norm2 = Normalize(out_channels) self.dropout = torch.nn.Dropout(dropout, inplace=True) - self.conv2 = torch.nn.Conv2d(out_channels, + self.conv2 = comfy.ops.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1) if self.in_channels != self.out_channels: if self.use_conv_shortcut: - self.conv_shortcut = torch.nn.Conv2d(in_channels, + self.conv_shortcut = comfy.ops.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1) else: - self.nin_shortcut = torch.nn.Conv2d(in_channels, + self.nin_shortcut = comfy.ops.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, @@ -188,22 +189,22 @@ class AttnBlock(nn.Module): self.in_channels = in_channels self.norm = Normalize(in_channels) - self.q = torch.nn.Conv2d(in_channels, + self.q = comfy.ops.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0) - self.k = torch.nn.Conv2d(in_channels, + self.k = comfy.ops.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0) - self.v = torch.nn.Conv2d(in_channels, + self.v = comfy.ops.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0) - self.proj_out = torch.nn.Conv2d(in_channels, + self.proj_out = comfy.ops.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, @@ -243,22 +244,22 @@ class MemoryEfficientAttnBlock(nn.Module): self.in_channels = in_channels self.norm = Normalize(in_channels) - self.q = torch.nn.Conv2d(in_channels, + self.q = comfy.ops.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0) - self.k = torch.nn.Conv2d(in_channels, + self.k = comfy.ops.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0) - self.v = torch.nn.Conv2d(in_channels, + self.v = comfy.ops.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0) - self.proj_out = torch.nn.Conv2d(in_channels, + self.proj_out = comfy.ops.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, @@ -302,22 +303,22 @@ class MemoryEfficientAttnBlockPytorch(nn.Module): self.in_channels = in_channels self.norm = Normalize(in_channels) - self.q = torch.nn.Conv2d(in_channels, + self.q = comfy.ops.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0) - self.k = torch.nn.Conv2d(in_channels, + self.k = comfy.ops.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0) - self.v = torch.nn.Conv2d(in_channels, + self.v = comfy.ops.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0) - self.proj_out = torch.nn.Conv2d(in_channels, + self.proj_out = comfy.ops.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, @@ -399,14 +400,14 @@ class Model(nn.Module): # timestep embedding self.temb = nn.Module() self.temb.dense = nn.ModuleList([ - torch.nn.Linear(self.ch, + comfy.ops.Linear(self.ch, self.temb_ch), - torch.nn.Linear(self.temb_ch, + comfy.ops.Linear(self.temb_ch, self.temb_ch), ]) # downsampling - self.conv_in = torch.nn.Conv2d(in_channels, + self.conv_in = comfy.ops.Conv2d(in_channels, self.ch, kernel_size=3, stride=1, @@ -475,7 +476,7 @@ class Model(nn.Module): # end self.norm_out = Normalize(block_in) - self.conv_out = torch.nn.Conv2d(block_in, + self.conv_out = comfy.ops.Conv2d(block_in, out_ch, kernel_size=3, stride=1, @@ -548,7 +549,7 @@ class Encoder(nn.Module): self.in_channels = in_channels # downsampling - self.conv_in = torch.nn.Conv2d(in_channels, + self.conv_in = comfy.ops.Conv2d(in_channels, self.ch, kernel_size=3, stride=1, @@ -593,7 +594,7 @@ class Encoder(nn.Module): # end self.norm_out = Normalize(block_in) - self.conv_out = torch.nn.Conv2d(block_in, + self.conv_out = comfy.ops.Conv2d(block_in, 2*z_channels if double_z else z_channels, kernel_size=3, stride=1, @@ -653,7 +654,7 @@ class Decoder(nn.Module): self.z_shape, np.prod(self.z_shape))) # z to block_in - self.conv_in = torch.nn.Conv2d(z_channels, + self.conv_in = comfy.ops.Conv2d(z_channels, block_in, kernel_size=3, stride=1, @@ -695,7 +696,7 @@ class Decoder(nn.Module): # end self.norm_out = Normalize(block_in) - self.conv_out = torch.nn.Conv2d(block_in, + self.conv_out = comfy.ops.Conv2d(block_in, out_ch, kernel_size=3, stride=1, From 2b13939044285adbad700690a501b3b3f11e4e4e Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sun, 30 Jul 2023 14:13:33 -0400 Subject: [PATCH 09/46] Remove some useless code. --- comfy/cldm/cldm.py | 19 +- .../modules/diffusionmodules/openaimodel.py | 230 +----------------- 2 files changed, 8 insertions(+), 241 deletions(-) diff --git a/comfy/cldm/cldm.py b/comfy/cldm/cldm.py index 2a16c8101..46fbf0a69 100644 --- a/comfy/cldm/cldm.py +++ b/comfy/cldm/cldm.py @@ -13,7 +13,7 @@ from ..ldm.modules.diffusionmodules.util import ( ) from ..ldm.modules.attention import SpatialTransformer -from ..ldm.modules.diffusionmodules.openaimodel import UNetModel, TimestepEmbedSequential, ResBlock, Downsample, AttentionBlock +from ..ldm.modules.diffusionmodules.openaimodel import UNetModel, TimestepEmbedSequential, ResBlock, Downsample from ..ldm.util import exists @@ -57,6 +57,7 @@ class ControlNet(nn.Module): transformer_depth_middle=None, ): super().__init__() + assert use_spatial_transformer == True, "use_spatial_transformer has to be true" if use_spatial_transformer: assert context_dim is not None, 'Fool!! You forgot to include the dimension of your cross-attention conditioning...' @@ -200,13 +201,7 @@ class ControlNet(nn.Module): if not exists(num_attention_blocks) or nr < num_attention_blocks[level]: layers.append( - AttentionBlock( - ch, - use_checkpoint=use_checkpoint, - num_heads=num_heads, - num_head_channels=dim_head, - use_new_attention_order=use_new_attention_order, - ) if not use_spatial_transformer else SpatialTransformer( + SpatialTransformer( ch, num_heads, dim_head, depth=transformer_depth[level], context_dim=context_dim, disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer, use_checkpoint=use_checkpoint @@ -259,13 +254,7 @@ class ControlNet(nn.Module): use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, ), - AttentionBlock( - ch, - use_checkpoint=use_checkpoint, - num_heads=num_heads, - num_head_channels=dim_head, - use_new_attention_order=use_new_attention_order, - ) if not use_spatial_transformer else SpatialTransformer( # always uses a self-attn + SpatialTransformer( # always uses a self-attn ch, num_heads, dim_head, depth=transformer_depth_middle, context_dim=context_dim, disable_self_attn=disable_middle_self_attn, use_linear=use_linear_in_transformer, use_checkpoint=use_checkpoint diff --git a/comfy/ldm/modules/diffusionmodules/openaimodel.py b/comfy/ldm/modules/diffusionmodules/openaimodel.py index 40060372e..90c153465 100644 --- a/comfy/ldm/modules/diffusionmodules/openaimodel.py +++ b/comfy/ldm/modules/diffusionmodules/openaimodel.py @@ -19,45 +19,6 @@ from ..attention import SpatialTransformer from comfy.ldm.util import exists -# dummy replace -def convert_module_to_f16(x): - pass - -def convert_module_to_f32(x): - pass - - -## go -class AttentionPool2d(nn.Module): - """ - Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py - """ - - def __init__( - self, - spacial_dim: int, - embed_dim: int, - num_heads_channels: int, - output_dim: int = None, - ): - super().__init__() - self.positional_embedding = nn.Parameter(th.randn(embed_dim, spacial_dim ** 2 + 1) / embed_dim ** 0.5) - self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1) - self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1) - self.num_heads = embed_dim // num_heads_channels - self.attention = QKVAttention(self.num_heads) - - def forward(self, x): - b, c, *_spatial = x.shape - x = x.reshape(b, c, -1) # NC(HW) - x = th.cat([x.mean(dim=-1, keepdim=True), x], dim=-1) # NC(HW+1) - x = x + self.positional_embedding[None, :, :].to(x.dtype) # NC(HW+1) - x = self.qkv_proj(x) - x = self.attention(x) - x = self.c_proj(x) - return x[:, :, 0] - - class TimestepBlock(nn.Module): """ Any module where forward() takes timestep embeddings as a second argument. @@ -138,19 +99,6 @@ class Upsample(nn.Module): x = self.conv(x) return x -class TransposedUpsample(nn.Module): - 'Learned 2x upsampling without padding' - def __init__(self, channels, out_channels=None, ks=5): - super().__init__() - self.channels = channels - self.out_channels = out_channels or channels - - self.up = nn.ConvTranspose2d(self.channels,self.out_channels,kernel_size=ks,stride=2) - - def forward(self,x): - return self.up(x) - - class Downsample(nn.Module): """ A downsampling layer with an optional convolution. @@ -296,142 +244,6 @@ class ResBlock(TimestepBlock): h = self.out_layers(h) return self.skip_connection(x) + h - -class AttentionBlock(nn.Module): - """ - An attention block that allows spatial positions to attend to each other. - Originally ported from here, but adapted to the N-d case. - https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66. - """ - - def __init__( - self, - channels, - num_heads=1, - num_head_channels=-1, - use_checkpoint=False, - use_new_attention_order=False, - ): - super().__init__() - self.channels = channels - if num_head_channels == -1: - self.num_heads = num_heads - else: - assert ( - channels % num_head_channels == 0 - ), f"q,k,v channels {channels} is not divisible by num_head_channels {num_head_channels}" - self.num_heads = channels // num_head_channels - self.use_checkpoint = use_checkpoint - self.norm = normalization(channels) - self.qkv = conv_nd(1, channels, channels * 3, 1) - if use_new_attention_order: - # split qkv before split heads - self.attention = QKVAttention(self.num_heads) - else: - # split heads before split qkv - self.attention = QKVAttentionLegacy(self.num_heads) - - self.proj_out = zero_module(conv_nd(1, channels, channels, 1)) - - def forward(self, x): - return checkpoint(self._forward, (x,), self.parameters(), True) # TODO: check checkpoint usage, is True # TODO: fix the .half call!!! - #return pt_checkpoint(self._forward, x) # pytorch - - def _forward(self, x): - b, c, *spatial = x.shape - x = x.reshape(b, c, -1) - qkv = self.qkv(self.norm(x)) - h = self.attention(qkv) - h = self.proj_out(h) - return (x + h).reshape(b, c, *spatial) - - -def count_flops_attn(model, _x, y): - """ - A counter for the `thop` package to count the operations in an - attention operation. - Meant to be used like: - macs, params = thop.profile( - model, - inputs=(inputs, timestamps), - custom_ops={QKVAttention: QKVAttention.count_flops}, - ) - """ - b, c, *spatial = y[0].shape - num_spatial = int(np.prod(spatial)) - # We perform two matmuls with the same number of ops. - # The first computes the weight matrix, the second computes - # the combination of the value vectors. - matmul_ops = 2 * b * (num_spatial ** 2) * c - model.total_ops += th.DoubleTensor([matmul_ops]) - - -class QKVAttentionLegacy(nn.Module): - """ - A module which performs QKV attention. Matches legacy QKVAttention + input/ouput heads shaping - """ - - def __init__(self, n_heads): - super().__init__() - self.n_heads = n_heads - - def forward(self, qkv): - """ - Apply QKV attention. - :param qkv: an [N x (H * 3 * C) x T] tensor of Qs, Ks, and Vs. - :return: an [N x (H * C) x T] tensor after attention. - """ - bs, width, length = qkv.shape - assert width % (3 * self.n_heads) == 0 - ch = width // (3 * self.n_heads) - q, k, v = qkv.reshape(bs * self.n_heads, ch * 3, length).split(ch, dim=1) - scale = 1 / math.sqrt(math.sqrt(ch)) - weight = th.einsum( - "bct,bcs->bts", q * scale, k * scale - ) # More stable with f16 than dividing afterwards - weight = th.softmax(weight.float(), dim=-1).type(weight.dtype) - a = th.einsum("bts,bcs->bct", weight, v) - return a.reshape(bs, -1, length) - - @staticmethod - def count_flops(model, _x, y): - return count_flops_attn(model, _x, y) - - -class QKVAttention(nn.Module): - """ - A module which performs QKV attention and splits in a different order. - """ - - def __init__(self, n_heads): - super().__init__() - self.n_heads = n_heads - - def forward(self, qkv): - """ - Apply QKV attention. - :param qkv: an [N x (3 * H * C) x T] tensor of Qs, Ks, and Vs. - :return: an [N x (H * C) x T] tensor after attention. - """ - bs, width, length = qkv.shape - assert width % (3 * self.n_heads) == 0 - ch = width // (3 * self.n_heads) - q, k, v = qkv.chunk(3, dim=1) - scale = 1 / math.sqrt(math.sqrt(ch)) - weight = th.einsum( - "bct,bcs->bts", - (q * scale).view(bs * self.n_heads, ch, length), - (k * scale).view(bs * self.n_heads, ch, length), - ) # More stable with f16 than dividing afterwards - weight = th.softmax(weight.float(), dim=-1).type(weight.dtype) - a = th.einsum("bts,bcs->bct", weight, v.reshape(bs * self.n_heads, ch, length)) - return a.reshape(bs, -1, length) - - @staticmethod - def count_flops(model, _x, y): - return count_flops_attn(model, _x, y) - - class Timestep(nn.Module): def __init__(self, dim): super().__init__() @@ -507,6 +319,7 @@ class UNetModel(nn.Module): device=None, ): super().__init__() + assert use_spatial_transformer == True, "use_spatial_transformer has to be true" if use_spatial_transformer: assert context_dim is not None, 'Fool!! You forgot to include the dimension of your cross-attention conditioning...' @@ -631,14 +444,7 @@ class UNetModel(nn.Module): disabled_sa = False if not exists(num_attention_blocks) or nr < num_attention_blocks[level]: - layers.append( - AttentionBlock( - ch, - use_checkpoint=use_checkpoint, - num_heads=num_heads, - num_head_channels=dim_head, - use_new_attention_order=use_new_attention_order, - ) if not use_spatial_transformer else SpatialTransformer( + layers.append(SpatialTransformer( ch, num_heads, dim_head, depth=transformer_depth[level], context_dim=context_dim, disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer, use_checkpoint=use_checkpoint, dtype=self.dtype, device=device @@ -693,13 +499,7 @@ class UNetModel(nn.Module): dtype=self.dtype, device=device, ), - AttentionBlock( - ch, - use_checkpoint=use_checkpoint, - num_heads=num_heads, - num_head_channels=dim_head, - use_new_attention_order=use_new_attention_order, - ) if not use_spatial_transformer else SpatialTransformer( # always uses a self-attn + SpatialTransformer( # always uses a self-attn ch, num_heads, dim_head, depth=transformer_depth_middle, context_dim=context_dim, disable_self_attn=disable_middle_self_attn, use_linear=use_linear_in_transformer, use_checkpoint=use_checkpoint, dtype=self.dtype, device=device @@ -751,13 +551,7 @@ class UNetModel(nn.Module): if not exists(num_attention_blocks) or i < num_attention_blocks[level]: layers.append( - AttentionBlock( - ch, - use_checkpoint=use_checkpoint, - num_heads=num_heads_upsample, - num_head_channels=dim_head, - use_new_attention_order=use_new_attention_order, - ) if not use_spatial_transformer else SpatialTransformer( + SpatialTransformer( ch, num_heads, dim_head, depth=transformer_depth[level], context_dim=context_dim, disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer, use_checkpoint=use_checkpoint, dtype=self.dtype, device=device @@ -797,22 +591,6 @@ class UNetModel(nn.Module): #nn.LogSoftmax(dim=1) # change to cross_entropy and produce non-normalized logits ) - def convert_to_fp16(self): - """ - Convert the torso of the model to float16. - """ - self.input_blocks.apply(convert_module_to_f16) - self.middle_block.apply(convert_module_to_f16) - self.output_blocks.apply(convert_module_to_f16) - - def convert_to_fp32(self): - """ - Convert the torso of the model to float32. - """ - self.input_blocks.apply(convert_module_to_f32) - self.middle_block.apply(convert_module_to_f32) - self.output_blocks.apply(convert_module_to_f32) - def forward(self, x, timesteps=None, context=None, y=None, control=None, transformer_options={}, **kwargs): """ Apply the model to an input batch. From 3cd31d0e24ef939e08ece7c78d53ebe0f7e0bdbb Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sun, 30 Jul 2023 17:18:24 -0400 Subject: [PATCH 10/46] Lower CPU thread check for running the text encoder on the CPU vs GPU. --- comfy/model_management.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index 241706925..75f3b38a9 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -364,7 +364,7 @@ def text_encoder_device(): if args.gpu_only: return get_torch_device() elif vram_state == VRAMState.HIGH_VRAM or vram_state == VRAMState.NORMAL_VRAM: - if torch.get_num_threads() < 8: #leaving the text encoder on the CPU is faster than shifting it if the CPU is fast enough. + if torch.get_num_threads() < 4: #leaving the text encoder on the CPU is faster than shifting it if the CPU is fast enough. return get_torch_device() else: return torch.device("cpu") From 3dcad78fe1506f2440952fcc86f9159446520247 Mon Sep 17 00:00:00 2001 From: FuamiCake Date: Sun, 30 Jul 2023 16:36:55 -0500 Subject: [PATCH 11/46] SaveLatent reports its outputs so they are visible to API --- nodes.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/nodes.py b/nodes.py index 240619ed1..097f92308 100644 --- a/nodes.py +++ b/nodes.py @@ -362,6 +362,14 @@ class SaveLatent: metadata[x] = json.dumps(extra_pnginfo[x]) file = f"{filename}_{counter:05}_.latent" + + results = list() + results.append({ + "filename": file, + "subfolder": subfolder, + "type": "output" + }) + file = os.path.join(full_output_folder, file) output = {} @@ -369,7 +377,7 @@ class SaveLatent: output["latent_format_version_0"] = torch.tensor([]) comfy.utils.save_torch_file(output, file, metadata=metadata) - return {} + return { "ui": { "latents": results } } class LoadLatent: From 4a77fcd6ab01d69e18c384faa29ae1c3d02237f3 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 31 Jul 2023 00:08:54 -0400 Subject: [PATCH 12/46] Only shift text encoder to vram when CPU cores are under 8. --- comfy/model_management.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index 75f3b38a9..0ffca06da 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -364,7 +364,8 @@ def text_encoder_device(): if args.gpu_only: return get_torch_device() elif vram_state == VRAMState.HIGH_VRAM or vram_state == VRAMState.NORMAL_VRAM: - if torch.get_num_threads() < 4: #leaving the text encoder on the CPU is faster than shifting it if the CPU is fast enough. + #NOTE: on a Ryzen 5 7600X with 4080 it's faster to shift to GPU + if torch.get_num_threads() < 8: #leaving the text encoder on the CPU is faster than shifting it if the CPU is fast enough. return get_torch_device() else: return torch.device("cpu") From 6cdc9afc7cea12adf1c58c7b106abf97f7849641 Mon Sep 17 00:00:00 2001 From: Guillaume Faguet Date: Mon, 31 Jul 2023 08:48:44 +0200 Subject: [PATCH 13/46] pass slider type as option --- web/scripts/widgets.js | 28 +++++++++------------------- 1 file changed, 9 insertions(+), 19 deletions(-) diff --git a/web/scripts/widgets.js b/web/scripts/widgets.js index 7b5f9c6b3..596fef898 100644 --- a/web/scripts/widgets.js +++ b/web/scripts/widgets.js @@ -250,19 +250,25 @@ function addMultilineWidget(node, name, opts, app) { return { minWidth: 400, minHeight: 200, widget }; } +function isSlider(display_as) { + return (display_as==="slider") ? "slider" : "number" +} + export const ComfyWidgets = { "INT:seed": seedWidget, "INT:noise_seed": seedWidget, FLOAT(node, inputName, inputData) { + let widgetType = isSlider(inputData[1]["display_as"]); const { val, config } = getNumberDefaults(inputData, 0.5); - return { widget: node.addWidget("number", inputName, val, () => {}, config) }; + return { widget: node.addWidget(widgetType, inputName, val, () => {}, config) }; }, INT(node, inputName, inputData) { + let widgetType = isSlider(inputData[1]["display_as"]); const { val, config } = getNumberDefaults(inputData, 1); Object.assign(config, { precision: 0 }); return { widget: node.addWidget( - "number", + widgetType, inputName, val, function (v) { @@ -270,23 +276,7 @@ export const ComfyWidgets = { this.value = Math.round(v / s) * s; }, config - ), - }; - }, - SLIDER(node, inputName, inputData) { - const { val, config } = getNumberDefaults(inputData, 1); - Object.assign(config, { precision: 0 }); - return { - widget: node.addWidget( - "slider", - inputName, - val, - function (v) { - const s = this.options.step / 10; - this.value = Math.round(v / s) * s; - }, - config - ), + ), }; }, TOGGLE(node, inputName, inputData) { From 076d2db60ff8483c9b2cccc26541d6237be14ebc Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 31 Jul 2023 22:38:11 -0400 Subject: [PATCH 14/46] display_as -> display. --- web/scripts/widgets.js | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/web/scripts/widgets.js b/web/scripts/widgets.js index 596fef898..ff5018b7f 100644 --- a/web/scripts/widgets.js +++ b/web/scripts/widgets.js @@ -250,20 +250,20 @@ function addMultilineWidget(node, name, opts, app) { return { minWidth: 400, minHeight: 200, widget }; } -function isSlider(display_as) { - return (display_as==="slider") ? "slider" : "number" +function isSlider(display) { + return (display==="slider") ? "slider" : "number" } export const ComfyWidgets = { "INT:seed": seedWidget, "INT:noise_seed": seedWidget, FLOAT(node, inputName, inputData) { - let widgetType = isSlider(inputData[1]["display_as"]); + let widgetType = isSlider(inputData[1]["display"]); const { val, config } = getNumberDefaults(inputData, 0.5); return { widget: node.addWidget(widgetType, inputName, val, () => {}, config) }; }, INT(node, inputName, inputData) { - let widgetType = isSlider(inputData[1]["display_as"]); + let widgetType = isSlider(inputData[1]["display"]); const { val, config } = getNumberDefaults(inputData, 1); Object.assign(config, { precision: 0 }); return { @@ -276,7 +276,7 @@ export const ComfyWidgets = { this.value = Math.round(v / s) * s; }, config - ), + ), }; }, TOGGLE(node, inputName, inputData) { From eb5191f911d6f474ae3d021343f7335fb96a55e8 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 1 Aug 2023 01:14:17 -0400 Subject: [PATCH 15/46] 0.0.0.0 doesn't work on windows. --- main.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/main.py b/main.py index 21f76b617..07ebbd701 100644 --- a/main.py +++ b/main.py @@ -160,6 +160,8 @@ if __name__ == "__main__": if args.auto_launch: def startup_server(address, port): import webbrowser + if os.name == 'nt' and address == '0.0.0.0': + address = '127.0.0.1' webbrowser.open(f"http://{address}:{port}") call_on_start = startup_server From d712193885ef69076f79dade517b7e2a6a1a482d Mon Sep 17 00:00:00 2001 From: FuamiCake Date: Tue, 1 Aug 2023 01:23:14 -0500 Subject: [PATCH 16/46] Add LatentBlend node, allowing for blending between two Latent inputs. --- nodes.py | 44 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) diff --git a/nodes.py b/nodes.py index 097f92308..86aed032e 100644 --- a/nodes.py +++ b/nodes.py @@ -1055,6 +1055,48 @@ class LatentComposite: samples_out["samples"] = s return (samples_out,) +class LatentBlend: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "samples_a": ("LATENT",), + "samples_b": ("LATENT",), + "blend_factor": ("FLOAT", { + "default": 0.5, + "min": 0, + "max": 1, + "step": 0.01 + }), + "blend_mode": (["normal"],), + }} + + RETURN_TYPES = ("LATENT",) + FUNCTION = "blend" + + CATEGORY = "_for_testing" + + def blend(self, samples_a, samples_b, blend_factor:float, blend_mode: str): + + samples_out = samples_a.copy() + samples_a = samples_a["samples"] + samples_b = samples_b["samples"] + + if samples_a.shape != samples_b.shape: + samples_b.permute(0, 3, 1, 2) + samples_b = comfy.utils.common_upscale(samples_b, samples_a.shape[3], samples_a.shape[2], 'bicubic', crop='center') + samples_b.permute(0, 2, 3, 1) + + samples_blended = self.blend_mode(samples_a, samples_b, blend_mode) + samples_blended = samples_a * (1 - blend_factor) + samples_blended * blend_factor + samples_out["samples"] = samples_blended + return (samples_out,) + + def blend_mode(self, img1, img2, mode): + if mode == "normal": + return img2 + else: + raise ValueError(f"Unsupported blend mode: {mode}") + class LatentCrop: @classmethod def INPUT_TYPES(s): @@ -1501,6 +1543,7 @@ NODE_CLASS_MAPPINGS = { "KSamplerAdvanced": KSamplerAdvanced, "SetLatentNoiseMask": SetLatentNoiseMask, "LatentComposite": LatentComposite, + "LatentBlend": LatentBlend, "LatentRotate": LatentRotate, "LatentFlip": LatentFlip, "LatentCrop": LatentCrop, @@ -1572,6 +1615,7 @@ NODE_DISPLAY_NAME_MAPPINGS = { "LatentUpscale": "Upscale Latent", "LatentUpscaleBy": "Upscale Latent By", "LatentComposite": "Latent Composite", + "LatentBlend": "Latent Blend", "LatentFromBatch" : "Latent From Batch", "RepeatLatentBatch": "Repeat Latent Batch", # Image From 38cfba04309ef96c3e759ef4d34b05b52692c9f3 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 1 Aug 2023 03:08:35 -0400 Subject: [PATCH 17/46] Rename toggle to boolean. --- web/scripts/widgets.js | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/web/scripts/widgets.js b/web/scripts/widgets.js index ff5018b7f..c128caa5a 100644 --- a/web/scripts/widgets.js +++ b/web/scripts/widgets.js @@ -279,7 +279,7 @@ export const ComfyWidgets = { ), }; }, - TOGGLE(node, inputName, inputData) { + BOOLEAN(node, inputName, inputData) { let defaultVal = inputData[1]["default"]; return { widget: node.addWidget( @@ -287,6 +287,7 @@ export const ComfyWidgets = { inputName, defaultVal, () => {}, + {"on": inputData[1].label_on, "off": inputData[1].label_off} ) }; }, From 834ab278d2761c452f8e76c83fb62d8f8ce39301 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 1 Aug 2023 03:17:04 -0400 Subject: [PATCH 18/46] Update instructions for mac. --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index f62d4289a..b055325ed 100644 --- a/README.md +++ b/README.md @@ -126,10 +126,10 @@ After this you should have everything installed and can proceed to running Comfy You can install ComfyUI in Apple Mac silicon (M1 or M2) with any recent macOS version. -1. Install pytorch. For instructions, read the [Accelerated PyTorch training on Mac](https://developer.apple.com/metal/pytorch/) Apple Developer guide. +1. Install pytorch nightly. For instructions, read the [Accelerated PyTorch training on Mac](https://developer.apple.com/metal/pytorch/) Apple Developer guide (make sure to install the latest pytorch nightly). 1. Follow the [ComfyUI manual installation](#manual-install-windows-linux) instructions for Windows and Linux. 1. Install the ComfyUI [dependencies](#dependencies). If you have another Stable Diffusion UI [you might be able to reuse the dependencies](#i-already-have-another-ui-for-stable-diffusion-installed-do-i-really-have-to-install-all-of-these-dependencies). -1. Launch ComfyUI by running `python main.py`. +1. Launch ComfyUI by running `python main.py --force-fp16`. Note that --force-fp16 will only work if you installed the latest pytorch nightly. > **Note**: Remember to add your models, VAE, LoRAs etc. to the corresponding Comfy folders, as discussed in [ComfyUI manual installation](#manual-install-windows-linux). From 7785d073f0171112f0df4f812125a30dc3d6b357 Mon Sep 17 00:00:00 2001 From: Michael Poutre Date: Tue, 1 Aug 2023 12:27:50 -0700 Subject: [PATCH 19/46] chore: Fix typo --- execution.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/execution.py b/execution.py index f19d0b237..619532578 100644 --- a/execution.py +++ b/execution.py @@ -42,9 +42,9 @@ def get_input_data(inputs, class_def, unique_id, outputs={}, prompt={}, extra_da def map_node_over_list(obj, input_data_all, func, allow_interrupt=False): # check if node wants the lists - intput_is_list = False + input_is_list = False if hasattr(obj, "INPUT_IS_LIST"): - intput_is_list = obj.INPUT_IS_LIST + input_is_list = obj.INPUT_IS_LIST max_len_input = max([len(x) for x in input_data_all.values()]) @@ -56,7 +56,7 @@ def map_node_over_list(obj, input_data_all, func, allow_interrupt=False): return d_new results = [] - if intput_is_list: + if input_is_list: if allow_interrupt: nodes.before_node_execution() results.append(getattr(obj, func)(**input_data_all)) From 90b01635248d09a043ff14d9a1a1ba9789bae7b7 Mon Sep 17 00:00:00 2001 From: Michael Poutre Date: Tue, 1 Aug 2023 12:29:01 -0700 Subject: [PATCH 20/46] fix(execution): Fix support for input-less nodes --- execution.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/execution.py b/execution.py index 619532578..a1a7c75c8 100644 --- a/execution.py +++ b/execution.py @@ -46,7 +46,10 @@ def map_node_over_list(obj, input_data_all, func, allow_interrupt=False): if hasattr(obj, "INPUT_IS_LIST"): input_is_list = obj.INPUT_IS_LIST - max_len_input = max([len(x) for x in input_data_all.values()]) + if len(input_data_all) == 0: + max_len_input = 0 + else: + max_len_input = max([len(x) for x in input_data_all.values()]) # get a slice of inputs, repeat last input when list isn't long enough def slice_dict(d, i): @@ -60,7 +63,11 @@ def map_node_over_list(obj, input_data_all, func, allow_interrupt=False): if allow_interrupt: nodes.before_node_execution() results.append(getattr(obj, func)(**input_data_all)) - else: + elif max_len_input == 0: + if allow_interrupt: + nodes.before_node_execution() + results.append(getattr(obj, func)()) + else: for i in range(max_len_input): if allow_interrupt: nodes.before_node_execution() From e4a3e9e54cb2c153be91b804a86b87ad344249e4 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 1 Aug 2023 18:50:06 -0400 Subject: [PATCH 21/46] Add an option in the UI to disable sliders. --- web/scripts/ui.js | 7 +++++++ web/scripts/widgets.js | 19 ++++++++++++------- 2 files changed, 19 insertions(+), 7 deletions(-) diff --git a/web/scripts/ui.js b/web/scripts/ui.js index d6376582d..5d4e92542 100644 --- a/web/scripts/ui.js +++ b/web/scripts/ui.js @@ -542,6 +542,13 @@ export class ComfyUI { defaultValue: "", }); + this.settings.addSetting({ + id: "Comfy.DisableSliders", + name: "Disable sliders.", + type: "boolean", + defaultValue: false, + }); + const fileInput = $el("input", { id: "comfy-file-input", type: "file", diff --git a/web/scripts/widgets.js b/web/scripts/widgets.js index c128caa5a..d5a28badf 100644 --- a/web/scripts/widgets.js +++ b/web/scripts/widgets.js @@ -79,8 +79,8 @@ export function addValueControlWidget(node, targetWidget, defaultValue = "random return valueControl; }; -function seedWidget(node, inputName, inputData) { - const seed = ComfyWidgets.INT(node, inputName, inputData); +function seedWidget(node, inputName, inputData, app) { + const seed = ComfyWidgets.INT(node, inputName, inputData, app); const seedControl = addValueControlWidget(node, seed.widget, "randomize"); seed.widget.linkedWidgets = [seedControl]; @@ -250,20 +250,25 @@ function addMultilineWidget(node, name, opts, app) { return { minWidth: 400, minHeight: 200, widget }; } -function isSlider(display) { +function isSlider(display, app) { + if (app.ui.settings.getSettingValue("Comfy.DisableSliders")) { + return "number" + } + return (display==="slider") ? "slider" : "number" } export const ComfyWidgets = { "INT:seed": seedWidget, "INT:noise_seed": seedWidget, - FLOAT(node, inputName, inputData) { - let widgetType = isSlider(inputData[1]["display"]); + FLOAT(node, inputName, inputData, app) { + let widgetType = isSlider(inputData[1]["display"], app); const { val, config } = getNumberDefaults(inputData, 0.5); return { widget: node.addWidget(widgetType, inputName, val, () => {}, config) }; }, - INT(node, inputName, inputData) { - let widgetType = isSlider(inputData[1]["display"]); + INT(node, inputName, inputData, app) { + console.log(app); + let widgetType = isSlider(inputData[1]["display"], app); const { val, config } = getNumberDefaults(inputData, 1); Object.assign(config, { precision: 0 }); return { From 05321fd947947d6122fd2a40520fa8da0d376456 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 3 Aug 2023 01:57:00 -0400 Subject: [PATCH 22/46] Add an experimental CTRL-B shortcut to bypass nodes. --- web/scripts/app.js | 42 +++++++++++++++++++++++++++++++++++++++++- 1 file changed, 41 insertions(+), 1 deletion(-) diff --git a/web/scripts/app.js b/web/scripts/app.js index 5d54edd76..8b273f626 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -768,6 +768,19 @@ export class ComfyApp { } block_default = true; } + + if (e.keyCode == 66 && e.ctrlKey) { + if (this.selected_nodes) { + for (var i in this.selected_nodes) { + if (this.selected_nodes[i].mode === 4) { // never + this.selected_nodes[i].mode = 0; // always + } else { + this.selected_nodes[i].mode = 4; // never + } + } + } + block_default = true; + } } this.graph.change(); @@ -914,14 +927,21 @@ export class ComfyApp { const origDrawNode = LGraphCanvas.prototype.drawNode; LGraphCanvas.prototype.drawNode = function (node, ctx) { var editor_alpha = this.editor_alpha; + var old_color = node.bgcolor; if (node.mode === 2) { // never this.editor_alpha = 0.4; } + if (node.mode === 4) { // never + node.bgcolor = "#FF00FF"; + this.editor_alpha = 0.2; + } + const res = origDrawNode.apply(this, arguments); this.editor_alpha = editor_alpha; + node.bgcolor = old_color; return res; }; @@ -1308,7 +1328,7 @@ export class ComfyApp { continue; } - if (node.mode === 2) { + if (node.mode === 2 || node.mode === 4) { // Don't serialize muted nodes continue; } @@ -1331,6 +1351,26 @@ export class ComfyApp { let parent = node.getInputNode(i); if (parent) { let link = node.getInputLink(i); + while (parent.mode === 4) { + let found = false; + if (link) { + let all_inputs = [link.origin_slot].concat(parent.inputs) + for (let parent_input in all_inputs) { + if (parent.inputs[parent_input].type === node.inputs[i].type) { + link = parent.getInputLink(parent_input); + if (link) { + parent = parent.getInputNode(parent_input); + } + found = true; + break; + } + } + } + if (!found) { + break; + } + } + while (parent && parent.isVirtualNode) { link = parent.getInputLink(link.origin_slot); if (link) { From 19fbab6ce3afa27dd5d015fbf4ad6a7df5c6c6c4 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 3 Aug 2023 02:36:02 -0400 Subject: [PATCH 23/46] Fix reroute nodes not working with bypassed nodes. --- web/scripts/app.js | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/web/scripts/app.js b/web/scripts/app.js index 8b273f626..75dc0fd69 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -1351,9 +1351,17 @@ export class ComfyApp { let parent = node.getInputNode(i); if (parent) { let link = node.getInputLink(i); - while (parent.mode === 4) { + while (parent.mode === 4 || parent.isVirtualNode) { let found = false; - if (link) { + if (parent.isVirtualNode) { + link = parent.getInputLink(link.origin_slot); + if (link) { + parent = parent.getInputNode(link.origin_slot); + if (parent) { + found = true; + } + } + } else if (link && parent.mode === 4) { let all_inputs = [link.origin_slot].concat(parent.inputs) for (let parent_input in all_inputs) { if (parent.inputs[parent_input].type === node.inputs[i].type) { @@ -1366,20 +1374,13 @@ export class ComfyApp { } } } + + if (!found) { break; } } - while (parent && parent.isVirtualNode) { - link = parent.getInputLink(link.origin_slot); - if (link) { - parent = parent.getInputNode(link.origin_slot); - } else { - parent = null; - } - } - if (link) { inputs[node.inputs[i].name] = [String(link.origin_id), parseInt(link.origin_slot)]; } From 077617e8c963b9dad5ac6b4efa92c61c9af3e166 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 3 Aug 2023 02:57:40 -0400 Subject: [PATCH 24/46] Fix bypassed nodes with no inputs. --- web/scripts/app.js | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/web/scripts/app.js b/web/scripts/app.js index 75dc0fd69..8c9e7a27f 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -1362,20 +1362,23 @@ export class ComfyApp { } } } else if (link && parent.mode === 4) { - let all_inputs = [link.origin_slot].concat(parent.inputs) - for (let parent_input in all_inputs) { - if (parent.inputs[parent_input].type === node.inputs[i].type) { - link = parent.getInputLink(parent_input); - if (link) { - parent = parent.getInputNode(parent_input); + let all_inputs = [link.origin_slot]; + if (parent.inputs) { + all_inputs = all_inputs.concat(Object.keys(parent.inputs)) + for (let parent_input in all_inputs) { + parent_input = all_inputs[parent_input]; + if (parent.inputs[parent_input].type === node.inputs[i].type) { + link = parent.getInputLink(parent_input); + if (link) { + parent = parent.getInputNode(parent_input); + } + found = true; + break; } - found = true; - break; } } } - if (!found) { break; } From d1347544bcb66bd87618164af13d9d300aefa200 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 3 Aug 2023 16:51:37 -0400 Subject: [PATCH 25/46] Make context menu filter import from relative path. --- web/extensions/core/contextMenuFilter.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/extensions/core/contextMenuFilter.js b/web/extensions/core/contextMenuFilter.js index 662d87e74..e0e8854b3 100644 --- a/web/extensions/core/contextMenuFilter.js +++ b/web/extensions/core/contextMenuFilter.js @@ -1,4 +1,4 @@ -import {app} from "/scripts/app.js"; +import {app} from "../../scripts/app.js"; // Adds filtering to combo context menus From 9534f0f8a5a026654492da378f84d2cdc589ed01 Mon Sep 17 00:00:00 2001 From: "Dr.Lt.Data" <128333288+ltdrdata@users.noreply.github.com> Date: Fri, 4 Aug 2023 09:24:52 +0900 Subject: [PATCH 26/46] allows convert to widget for boolean type (#1063) --- web/extensions/core/widgetInputs.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/extensions/core/widgetInputs.js b/web/extensions/core/widgetInputs.js index 7600ce87b..d9eaf8a0c 100644 --- a/web/extensions/core/widgetInputs.js +++ b/web/extensions/core/widgetInputs.js @@ -2,7 +2,7 @@ import { ComfyWidgets, addValueControlWidget } from "../../scripts/widgets.js"; import { app } from "../../scripts/app.js"; const CONVERTED_TYPE = "converted-widget"; -const VALID_TYPES = ["STRING", "combo", "number"]; +const VALID_TYPES = ["STRING", "combo", "number", "BOOLEAN"]; function isConvertableWidget(widget, config) { return VALID_TYPES.includes(widget.type) || VALID_TYPES.includes(config[0]); From c99d8002f8a479e9505830347cee37f7d603394c Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 3 Aug 2023 20:27:50 -0400 Subject: [PATCH 27/46] Make sure the pooled output stays at the EOS token with added embeddings. --- comfy/sd1_clip.py | 18 +++++++++++++----- web/scripts/widgets.js | 1 - 2 files changed, 13 insertions(+), 6 deletions(-) diff --git a/comfy/sd1_clip.py b/comfy/sd1_clip.py index d504bf77d..feca41880 100644 --- a/comfy/sd1_clip.py +++ b/comfy/sd1_clip.py @@ -91,13 +91,15 @@ class SD1ClipModel(torch.nn.Module, ClipTokenWeightEncoder): def set_up_textual_embeddings(self, tokens, current_embeds): out_tokens = [] - next_new_token = token_dict_size = current_embeds.weight.shape[0] + next_new_token = token_dict_size = current_embeds.weight.shape[0] - 1 embedding_weights = [] for x in tokens: tokens_temp = [] for y in x: if isinstance(y, int): + if y == token_dict_size: #EOS token + y = -1 tokens_temp += [y] else: if y.shape[0] == current_embeds.weight.shape[1]: @@ -110,15 +112,21 @@ class SD1ClipModel(torch.nn.Module, ClipTokenWeightEncoder): tokens_temp += [self.empty_tokens[0][-1]] out_tokens += [tokens_temp] + n = token_dict_size if len(embedding_weights) > 0: - new_embedding = torch.nn.Embedding(next_new_token, current_embeds.weight.shape[1], device=current_embeds.weight.device, dtype=current_embeds.weight.dtype) - new_embedding.weight[:token_dict_size] = current_embeds.weight[:] - n = token_dict_size + new_embedding = torch.nn.Embedding(next_new_token + 1, current_embeds.weight.shape[1], device=current_embeds.weight.device, dtype=current_embeds.weight.dtype) + new_embedding.weight[:token_dict_size] = current_embeds.weight[:-1] for x in embedding_weights: new_embedding.weight[n] = x n += 1 + new_embedding.weight[n] = current_embeds.weight[-1] #EOS embedding self.transformer.set_input_embeddings(new_embedding) - return out_tokens + + processed_tokens = [] + for x in out_tokens: + processed_tokens += [list(map(lambda a: n if a == -1 else a, x))] #The EOS token should always be the largest one + + return processed_tokens def forward(self, tokens): backup_embeds = self.transformer.get_input_embeddings() diff --git a/web/scripts/widgets.js b/web/scripts/widgets.js index d5a28badf..d4a15ba84 100644 --- a/web/scripts/widgets.js +++ b/web/scripts/widgets.js @@ -267,7 +267,6 @@ export const ComfyWidgets = { return { widget: node.addWidget(widgetType, inputName, val, () => {}, config) }; }, INT(node, inputName, inputData, app) { - console.log(app); let widgetType = isSlider(inputData[1]["display"], app); const { val, config } = getNumberDefaults(inputData, 1); Object.assign(config, { precision: 0 }); From fa962e86c1cdc3bb9dd57ac028fba0e577346983 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 4 Aug 2023 02:51:28 -0400 Subject: [PATCH 28/46] Make LatentBlend more consistent with other nodes. --- nodes.py | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/nodes.py b/nodes.py index 86aed032e..92baffe30 100644 --- a/nodes.py +++ b/nodes.py @@ -1059,15 +1059,14 @@ class LatentBlend: @classmethod def INPUT_TYPES(s): return {"required": { - "samples_a": ("LATENT",), - "samples_b": ("LATENT",), + "samples1": ("LATENT",), + "samples2": ("LATENT",), "blend_factor": ("FLOAT", { "default": 0.5, "min": 0, "max": 1, "step": 0.01 }), - "blend_mode": (["normal"],), }} RETURN_TYPES = ("LATENT",) @@ -1075,19 +1074,19 @@ class LatentBlend: CATEGORY = "_for_testing" - def blend(self, samples_a, samples_b, blend_factor:float, blend_mode: str): + def blend(self, samples1, samples2, blend_factor:float, blend_mode: str="normal"): - samples_out = samples_a.copy() - samples_a = samples_a["samples"] - samples_b = samples_b["samples"] + samples_out = samples1.copy() + samples1 = samples1["samples"] + samples2 = samples2["samples"] - if samples_a.shape != samples_b.shape: - samples_b.permute(0, 3, 1, 2) - samples_b = comfy.utils.common_upscale(samples_b, samples_a.shape[3], samples_a.shape[2], 'bicubic', crop='center') - samples_b.permute(0, 2, 3, 1) + if samples1.shape != samples2.shape: + samples2.permute(0, 3, 1, 2) + samples2 = comfy.utils.common_upscale(samples2, samples1.shape[3], samples1.shape[2], 'bicubic', crop='center') + samples2.permute(0, 2, 3, 1) - samples_blended = self.blend_mode(samples_a, samples_b, blend_mode) - samples_blended = samples_a * (1 - blend_factor) + samples_blended * blend_factor + samples_blended = self.blend_mode(samples1, samples2, blend_mode) + samples_blended = samples1 * blend_factor + samples_blended * (1 - blend_factor) samples_out["samples"] = samples_blended return (samples_out,) From d7638c47fc36ea7366d970f56c005216e4793e82 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 4 Aug 2023 03:22:47 -0400 Subject: [PATCH 29/46] Fix ui inconsistency. --- web/scripts/ui.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/scripts/ui.js b/web/scripts/ui.js index 5d4e92542..03a4035b1 100644 --- a/web/scripts/ui.js +++ b/web/scripts/ui.js @@ -480,7 +480,7 @@ class ComfyList { hide() { this.element.style.display = "none"; - this.button.textContent = "See " + this.#text; + this.button.textContent = "View " + this.#text; } toggle() { From 0bbd9dd4d9cc3749f4ec0903f33c5ffb9d7aecfd Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Fri, 4 Aug 2023 08:29:25 +0100 Subject: [PATCH 30/46] add system info to stats endpoint --- server.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/server.py b/server.py index f61b11a97..fab33be3e 100644 --- a/server.py +++ b/server.py @@ -345,6 +345,11 @@ class PromptServer(): vram_total, torch_vram_total = comfy.model_management.get_total_memory(device, torch_total_too=True) vram_free, torch_vram_free = comfy.model_management.get_free_memory(device, torch_free_too=True) system_stats = { + "system": { + "os": os.name, + "python_version": sys.version, + "embedded_python": os.path.split(os.path.split(sys.executable)[0])[1] == "python_embeded" + }, "devices": [ { "name": device_name, From 43ae9fe7216198ff044ea33ac6f6d21a9ca9c2af Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Fri, 4 Aug 2023 08:29:51 +0100 Subject: [PATCH 31/46] add system stats function --- web/scripts/api.js | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/web/scripts/api.js b/web/scripts/api.js index d3d15e47e..b1d245d73 100644 --- a/web/scripts/api.js +++ b/web/scripts/api.js @@ -264,6 +264,15 @@ class ComfyApi extends EventTarget { } } + /** + * Gets system & device stats + * @returns System stats such as python version, OS, per device info + */ + async getSystemStats() { + const res = await this.fetchApi("/system_stats"); + return await res.json(); + } + /** * Sends a POST request to the API * @param {*} type The endpoint to post to From b2ea0cbd5c5dfbb734f375acd042bd49cabe84ec Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Fri, 4 Aug 2023 08:30:01 +0100 Subject: [PATCH 32/46] add logging --- web/scripts/app.js | 6 + web/scripts/logging.js | 367 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 373 insertions(+) create mode 100644 web/scripts/logging.js diff --git a/web/scripts/app.js b/web/scripts/app.js index 8c9e7a27f..11903a2d4 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -1,3 +1,4 @@ +import { ComfyLogging } from "./logging.js"; import { ComfyWidgets } from "./widgets.js"; import { ComfyUI, $el } from "./ui.js"; import { api } from "./api.js"; @@ -31,6 +32,7 @@ export class ComfyApp { constructor() { this.ui = new ComfyUI(this); + this.logging = new ComfyLogging(this); /** * List of extensions that are registered with the app @@ -1023,6 +1025,7 @@ export class ComfyApp { */ async #loadExtensions() { const extensions = await api.getExtensions(); + this.logging.addEntry("Comfy.App", "debug", { Extensions: extensions }); for (const ext of extensions) { try { await import(api.apiURL(ext)); @@ -1306,6 +1309,9 @@ export class ComfyApp { (t) => `
  • ${t}
  • ` ).join("")}Nodes that have failed to load will show as red on the graph.` ); + this.logging.addEntry("Comfy.App", "warn", { + MissingNodes: nodes, + }); } } diff --git a/web/scripts/logging.js b/web/scripts/logging.js new file mode 100644 index 000000000..c73462e1e --- /dev/null +++ b/web/scripts/logging.js @@ -0,0 +1,367 @@ +import { $el, ComfyDialog } from "./ui.js"; +import { api } from "./api.js"; + +$el("style", { + textContent: ` + .comfy-logging-logs { + display: grid; + color: var(--fg-color); + white-space: pre-wrap; + } + .comfy-logging-log { + display: contents; + } + .comfy-logging-title { + background: var(--tr-even-bg-color); + font-weight: bold; + margin-bottom: 5px; + text-align: center; + } + .comfy-logging-log div { + background: var(--row-bg); + padding: 5px; + } + `, + parent: document.body, +}); + +// Stringify function supporting max depth and removal of circular references +// https://stackoverflow.com/a/57193345 +function stringify(val, depth, replacer, space, onGetObjID) { + depth = isNaN(+depth) ? 1 : depth; + var recursMap = new WeakMap(); + function _build(val, depth, o, a, r) { + // (JSON.stringify() has it's own rules, which we respect here by using it for property iteration) + return !val || typeof val != "object" + ? val + : ((r = recursMap.has(val)), + recursMap.set(val, true), + (a = Array.isArray(val)), + r + ? (o = (onGetObjID && onGetObjID(val)) || null) + : JSON.stringify(val, function (k, v) { + if (a || depth > 0) { + if (replacer) v = replacer(k, v); + if (!k) return (a = Array.isArray(v)), (val = v); + !o && (o = a ? [] : {}); + o[k] = _build(v, a ? depth : depth - 1); + } + }), + o === void 0 ? (a ? [] : {}) : o); + } + return JSON.stringify(_build(val, depth), null, space); +} + +const jsonReplacer = (k, v, ui) => { + if (v instanceof Array && v.length === 1) { + v = v[0]; + } + if (v instanceof Date) { + v = v.toISOString(); + if (ui) { + v = v.split("T")[1]; + } + } + if (v instanceof Error) { + let err = ""; + if (v.name) err += v.name + "\n"; + if (v.message) err += v.message + "\n"; + if (v.stack) err += v.stack + "\n"; + if (!err) { + err = v.toString(); + } + v = err; + } + return v; +}; + +const fileInput = $el("input", { + type: "file", + accept: ".json", + style: { display: "none" }, + parent: document.body, +}); + +class ComfyLoggingDialog extends ComfyDialog { + constructor(logging) { + super(); + this.logging = logging; + } + + clear() { + this.logging.clear(); + this.show(); + } + + export() { + const blob = new Blob([stringify([...this.logging.entries], 20, jsonReplacer, "\t")], { + type: "application/json", + }); + const url = URL.createObjectURL(blob); + const a = $el("a", { + href: url, + download: `comfyui-logs-${Date.now()}.json`, + style: { display: "none" }, + parent: document.body, + }); + a.click(); + setTimeout(function () { + a.remove(); + window.URL.revokeObjectURL(url); + }, 0); + } + + import() { + fileInput.onchange = () => { + const reader = new FileReader(); + reader.onload = () => { + fileInput.remove(); + try { + const obj = JSON.parse(reader.result); + if (obj instanceof Array) { + this.show(obj); + } else { + throw new Error("Invalid file selected."); + } + } catch (error) { + alert("Unable to load logs: " + error.message); + } + }; + reader.readAsText(fileInput.files[0]); + }; + fileInput.click(); + } + + createButtons() { + return [ + $el("button", { + type: "button", + textContent: "Clear", + onclick: () => this.clear(), + }), + $el("button", { + type: "button", + textContent: "Export logs...", + onclick: () => this.export(), + }), + $el("button", { + type: "button", + textContent: "View exported logs...", + onclick: () => this.import(), + }), + ...super.createButtons(), + ]; + } + + getTypeColor(type) { + switch (type) { + case "error": + return "red"; + case "warn": + return "orange"; + case "debug": + return "dodgerblue"; + } + } + + show(entries) { + if (!entries) entries = this.logging.entries; + this.element.style.width = "100%"; + const cols = { + source: "Source", + type: "Type", + timestamp: "Timestamp", + message: "Message", + }; + const keys = Object.keys(cols); + const headers = Object.values(cols).map((title) => + $el("div.comfy-logging-title", { + textContent: title, + }) + ); + const rows = entries.map((entry, i) => { + return $el( + "div.comfy-logging-log", + { + $: (el) => el.style.setProperty("--row-bg", `var(--tr-${i % 2 ? "even" : "odd"}-bg-color)`), + }, + keys.map((key) => { + let v = entry[key]; + let color; + if (key === "type") { + color = this.getTypeColor(v); + } else { + v = jsonReplacer(key, v, true); + + if (typeof v === "object") { + v = stringify(v, 5, jsonReplacer, " "); + } + } + + return $el("div", { + style: { + color, + }, + textContent: v, + }); + }) + ); + }); + + const grid = $el( + "div.comfy-logging-logs", + { + style: { + gridTemplateColumns: `repeat(${headers.length}, 1fr)`, + }, + }, + [...headers, ...rows] + ); + const els = [grid]; + if (!this.logging.enabled) { + els.unshift( + $el("h3", { + style: { textAlign: "center" }, + textContent: "Logging is disabled", + }) + ); + } + super.show($el("div", els)); + } +} + +export class ComfyLogging { + /** + * @type Array<{ source: string, type: string, timestamp: Date, message: any }> + */ + entries = []; + + #enabled; + #console = {}; + + get enabled() { + return this.#enabled; + } + + set enabled(value) { + if (value === this.#enabled) return; + if (value) { + this.patchConsole(); + } else { + this.unpatchConsole(); + } + this.#enabled = value; + } + + constructor(app) { + this.app = app; + + this.dialog = new ComfyLoggingDialog(this); + this.addSetting(); + this.catchUnhandled(); + this.addInitData(); + } + + addSetting() { + const settingId = "Comfy.Logging.Enabled"; + const htmlSettingId = settingId.replaceAll(".", "-"); + const setting = this.app.ui.settings.addSetting({ + id: settingId, + name: settingId, + defaultValue: true, + type: (name, setter, value) => { + return $el("tr", [ + $el("td", [ + $el("label", { + textContent: "Logging", + for: htmlSettingId, + }), + ]), + $el("td", [ + $el("input", { + id: htmlSettingId, + type: "checkbox", + checked: value, + onchange: (event) => { + setter((this.enabled = event.target.checked)); + }, + }), + $el("button", { + textContent: "View Logs", + onclick: () => { + this.app.ui.settings.element.close(); + this.dialog.show(); + }, + style: { + fontSize: "14px", + display: "block", + marginTop: "5px", + }, + }), + ]), + ]); + }, + }); + this.enabled = setting.value; + } + + patchConsole() { + // Capture common console outputs + const self = this; + for (const type of ["log", "warn", "error", "debug"]) { + const orig = console[type]; + this.#console[type] = orig; + console[type] = function () { + orig.apply(console, arguments); + self.addEntry("console", type, ...arguments); + }; + } + } + + unpatchConsole() { + // Restore original console functions + for (const type of Object.keys(this.#console)) { + console[type] = this.#console[type]; + } + this.#console = {}; + } + + catchUnhandled() { + // Capture uncaught errors + window.addEventListener("error", (e) => { + this.addEntry("window", "error", e.error ?? "Unknown error"); + return false; + }); + + window.addEventListener("unhandledrejection", (e) => { + this.addEntry("unhandledrejection", "error", e.reason ?? "Unknown error"); + }); + } + + clear() { + this.entries = []; + } + + addEntry(source, type, ...args) { + if (this.enabled) { + this.entries.push({ + source, + type, + timestamp: new Date(), + message: args, + }); + } + } + + log(source, ...args) { + this.addEntry(source, "log", ...args); + } + + async addInitData() { + if (!this.enabled) return; + const source = "ComfyUI.Logging"; + this.addEntry(source, "debug", { UserAgent: navigator.userAgent }); + const systemStats = await api.getSystemStats(); + this.addEntry(source, "debug", systemStats); + } +} From 3d614dde499d7c7fcb29696ce4999967b51757c2 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 4 Aug 2023 03:47:45 -0400 Subject: [PATCH 33/46] Fix bug with reroutes and bypass. --- web/scripts/app.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/scripts/app.js b/web/scripts/app.js index 8c9e7a27f..c4d593f2c 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -1356,7 +1356,7 @@ export class ComfyApp { if (parent.isVirtualNode) { link = parent.getInputLink(link.origin_slot); if (link) { - parent = parent.getInputNode(link.origin_slot); + parent = parent.getInputNode(link.target_slot); if (parent) { found = true; } From 1ce0d8ad68e15c58a0e9793eb873f0238f741f4c Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 4 Aug 2023 12:08:45 -0400 Subject: [PATCH 34/46] Add CMP 30HX card to the nvidia_16_series list. --- comfy/model_management.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index 0ffca06da..4dd15b41c 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -535,7 +535,7 @@ def should_use_fp16(device=None, model_params=0): return False #FP16 is just broken on these cards - nvidia_16_series = ["1660", "1650", "1630", "T500", "T550", "T600", "MX550", "MX450"] + nvidia_16_series = ["1660", "1650", "1630", "T500", "T550", "T600", "MX550", "MX450", "CMP 30HX"] for x in nvidia_16_series: if x in props.name: return False From 8918f1085ca18b7a4d90a4120eec2e8df9062979 Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Fri, 4 Aug 2023 21:26:11 +0100 Subject: [PATCH 35/46] Add setting to change link render mode Add support for combo settings --- web/extensions/core/linkRenderMode.js | 25 ++++++++++++++++++++++++ web/scripts/ui.js | 28 ++++++++++++++++++++++++++- 2 files changed, 52 insertions(+), 1 deletion(-) create mode 100644 web/extensions/core/linkRenderMode.js diff --git a/web/extensions/core/linkRenderMode.js b/web/extensions/core/linkRenderMode.js new file mode 100644 index 000000000..8b8d4e01f --- /dev/null +++ b/web/extensions/core/linkRenderMode.js @@ -0,0 +1,25 @@ +import { app } from "/scripts/app.js"; + +const id = "Comfy.LinkRenderMode"; +const ext = { + name: id, + async setup(app) { + app.ui.settings.addSetting({ + id, + name: "Link Render Mode", + defaultValue: 2, + type: "combo", + options: LiteGraph.LINK_RENDER_MODES.map((m, i) => ({ + value: i, + text: m, + selected: i == app.canvas.links_render_mode, + })), + onChange(value) { + app.canvas.links_render_mode = +value; + app.graph.setDirtyCanvas(true); + }, + }); + }, +}; + +app.registerExtension(ext); diff --git a/web/scripts/ui.js b/web/scripts/ui.js index 03a4035b1..86e2a1c41 100644 --- a/web/scripts/ui.js +++ b/web/scripts/ui.js @@ -234,7 +234,7 @@ class ComfySettingsDialog extends ComfyDialog { localStorage[settingId] = JSON.stringify(value); } - addSetting({id, name, type, defaultValue, onChange, attrs = {}, tooltip = "",}) { + addSetting({id, name, type, defaultValue, onChange, attrs = {}, tooltip = "", options = undefined}) { if (!id) { throw new Error("Settings must have an ID"); } @@ -347,6 +347,32 @@ class ComfySettingsDialog extends ComfyDialog { ]), ]); break; + case "combo": + element = $el("tr", [ + labelCell, + $el("td", [ + $el( + "select", + { + oninput: (e) => { + setter(e.target.value); + }, + }, + (typeof options === "function" ? options(value) : options || []).map((opt) => { + if (typeof opt === "string") { + opt = { text: opt }; + } + const v = opt.value ?? opt.text; + return $el("option", { + value: v, + textContent: opt.text, + selected: value + "" === v + "", + }); + }) + ), + ]), + ]); + break; case "text": default: if (type !== "text") { From 5a90d3cea57d1507227a6324ae9efb5e77410cea Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 4 Aug 2023 21:44:37 -0400 Subject: [PATCH 36/46] GeForce MX110 + MX130 are maxwell. --- cuda_malloc.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cuda_malloc.py b/cuda_malloc.py index a808b2071..e586d3eff 100644 --- a/cuda_malloc.py +++ b/cuda_malloc.py @@ -40,7 +40,8 @@ def cuda_malloc_supported(): blacklist = {"GeForce GTX TITAN X", "GeForce GTX 980", "GeForce GTX 970", "GeForce GTX 960", "GeForce GTX 950", "GeForce 945M", "GeForce 940M", "GeForce 930M", "GeForce 920M", "GeForce 910M", "GeForce GTX 750", "GeForce GTX 745", "Quadro K620", "Quadro K1200", "Quadro K2200", "Quadro M500", "Quadro M520", "Quadro M600", "Quadro M620", "Quadro M1000", - "Quadro M1200", "Quadro M2000", "Quadro M2200", "Quadro M3000", "Quadro M4000", "Quadro M5000", "Quadro M5500", "Quadro M6000"} + "Quadro M1200", "Quadro M2000", "Quadro M2200", "Quadro M3000", "Quadro M4000", "Quadro M5000", "Quadro M5500", "Quadro M6000", + "GeForce MX110", "GeForce MX130"} try: names = get_gpu_names() From c5d7593ccfb4dd3a97175e01b9fa883086f5d8b4 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sat, 5 Aug 2023 01:40:24 -0400 Subject: [PATCH 37/46] Support loras in diffusers format. --- comfy/sd.py | 24 +++++++++++++++++++----- 1 file changed, 19 insertions(+), 5 deletions(-) diff --git a/comfy/sd.py b/comfy/sd.py index 922cbf21e..7511bb501 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -70,13 +70,22 @@ def load_lora(lora, to_load): alpha = lora[alpha_name].item() loaded_keys.add(alpha_name) - A_name = "{}.lora_up.weight".format(x) - B_name = "{}.lora_down.weight".format(x) - mid_name = "{}.lora_mid.weight".format(x) + regular_lora = "{}.lora_up.weight".format(x) + diffusers_lora = "{}_lora.up.weight".format(x) + A_name = None - if A_name in lora.keys(): + if regular_lora in lora.keys(): + A_name = regular_lora + B_name = "{}.lora_down.weight".format(x) + mid_name = "{}.lora_mid.weight".format(x) + elif diffusers_lora in lora.keys(): + A_name = diffusers_lora + B_name = "{}_lora.down.weight".format(x) + mid_name = None + + if A_name is not None: mid = None - if mid_name in lora.keys(): + if mid_name is not None and mid_name in lora.keys(): mid = lora[mid_name] loaded_keys.add(mid_name) patch_dict[to_load[x]] = (lora[A_name], lora[B_name], alpha, mid) @@ -202,6 +211,11 @@ def model_lora_keys_unet(model, key_map={}): if k.endswith(".weight"): key_lora = k[:-len(".weight")].replace(".", "_") key_map["lora_unet_{}".format(key_lora)] = "diffusion_model.{}".format(diffusers_keys[k]) + + diffusers_lora_key = "unet.{}".format(k[:-len(".weight")].replace(".to_", ".processor.to_")) + if diffusers_lora_key.endswith(".to_out.0"): + diffusers_lora_key = diffusers_lora_key[:-2] + key_map[diffusers_lora_key] = "diffusion_model.{}".format(diffusers_keys[k]) return key_map def set_attr(obj, attr, value): From 32e115b81817e4f3512e0391da8b0b8a9754de10 Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Sat, 5 Aug 2023 11:00:18 +0100 Subject: [PATCH 38/46] prevent crashing if the widget cant be found --- web/extensions/core/contextMenuFilter.js | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/web/extensions/core/contextMenuFilter.js b/web/extensions/core/contextMenuFilter.js index e0e8854b3..0b2256e8c 100644 --- a/web/extensions/core/contextMenuFilter.js +++ b/web/extensions/core/contextMenuFilter.js @@ -27,10 +27,10 @@ const ext = { const clickedComboValue = currentNode.widgets .filter(w => w.type === "combo" && w.options.values.length === values.length) .find(w => w.options.values.every((v, i) => v === values[i])) - .value; + ?.value; - let selectedIndex = values.findIndex(v => v === clickedComboValue); - let selectedItem = displayedItems?.[selectedIndex]; + let selectedIndex = clickedComboValue ? values.findIndex(v => v === clickedComboValue) : 0; + let selectedItem = displayedItems[selectedIndex]; updateSelected(); // Apply highlighting to the selected item From b948b2cf41cd8b4b1c925a6ef9e689615f86e8ad Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Sat, 5 Aug 2023 11:04:04 +0100 Subject: [PATCH 39/46] handle value missing --- web/extensions/core/contextMenuFilter.js | 3 +++ 1 file changed, 3 insertions(+) diff --git a/web/extensions/core/contextMenuFilter.js b/web/extensions/core/contextMenuFilter.js index 0b2256e8c..152cd7043 100644 --- a/web/extensions/core/contextMenuFilter.js +++ b/web/extensions/core/contextMenuFilter.js @@ -30,6 +30,9 @@ const ext = { ?.value; let selectedIndex = clickedComboValue ? values.findIndex(v => v === clickedComboValue) : 0; + if (selectedIndex < 0) { + selectedIndex = 0; + } let selectedItem = displayedItems[selectedIndex]; updateSelected(); From 435577457a8576386910c62662eddb8a82efddb0 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sat, 5 Aug 2023 17:18:45 -0400 Subject: [PATCH 40/46] Add a way to use cloudflared tunnel to the colab notebook. --- notebooks/comfyui_colab.ipynb | 53 ++++++++++++++++++++++++++++++++++- 1 file changed, 52 insertions(+), 1 deletion(-) diff --git a/notebooks/comfyui_colab.ipynb b/notebooks/comfyui_colab.ipynb index 1bb90f7d0..84f2cf403 100644 --- a/notebooks/comfyui_colab.ipynb +++ b/notebooks/comfyui_colab.ipynb @@ -159,13 +159,64 @@ "\n" ] }, + { + "cell_type": "markdown", + "metadata": { + "id": "kkkkkkkkkkkkkkk" + }, + "source": [ + "### Run ComfyUI with cloudflared (Recommended Way)\n", + "\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "jjjjjjjjjjjjjj" + }, + "outputs": [], + "source": [ + "!wget https://github.com/cloudflare/cloudflared/releases/latest/download/cloudflared-linux-amd64.deb\n", + "!dpkg -i cloudflared-linux-amd64.deb\n" + "\n", + "import subprocess\n", + "import threading\n", + "import time\n", + "import socket\n", + "import urllib.request\n", + "\n", + "def iframe_thread(port):\n", + " while True:\n", + " time.sleep(0.5)\n", + " sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n", + " result = sock.connect_ex(('127.0.0.1', port))\n", + " if result == 0:\n", + " break\n", + " sock.close()\n", + " print(\"\\nComfyUI finished loading, trying to launch cloudflared (if it gets stuck here cloudflared is having issues)\\n\")\n", + "\n", + " p = subprocess.Popen([\"cloudflared\", \"tunnel\", \"--url\", \"http://127.0.0.1:{}\".format(port)], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n", + " for line in p.stderr:\n", + " l = line.decode()\n", + " if \"trycloudflare.com \" in l:\n", + " print(\"This is the URL to access ComfyUI:\", l[l.find(\"http\"):], end='')\n", + " #print(l, end='')\n", + "\n", + "\n", + "threading.Thread(target=iframe_thread, daemon=True, args=(8188,)).start()\n", + "\n", + "!python main.py --dont-print-server" + ] + }, { "cell_type": "markdown", "metadata": { "id": "kkkkkkkkkkkkkk" }, "source": [ - "### Run ComfyUI with localtunnel (Recommended Way)\n", + "### Run ComfyUI with localtunnel\n", "\n", "\n" ] From c9ef919e29cc2454419eb3454e334b7a4c7814a6 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sat, 5 Aug 2023 17:20:35 -0400 Subject: [PATCH 41/46] Formatting issue. --- notebooks/comfyui_colab.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/notebooks/comfyui_colab.ipynb b/notebooks/comfyui_colab.ipynb index 84f2cf403..b1c487101 100644 --- a/notebooks/comfyui_colab.ipynb +++ b/notebooks/comfyui_colab.ipynb @@ -179,7 +179,7 @@ "outputs": [], "source": [ "!wget https://github.com/cloudflare/cloudflared/releases/latest/download/cloudflared-linux-amd64.deb\n", - "!dpkg -i cloudflared-linux-amd64.deb\n" + "!dpkg -i cloudflared-linux-amd64.deb\n", "\n", "import subprocess\n", "import threading\n", From fc71cf656e1f26e6577c0a211b7460fc078b0c39 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sat, 5 Aug 2023 21:53:25 -0400 Subject: [PATCH 42/46] Add some 800M gpus to cuda malloc blacklist. --- cuda_malloc.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cuda_malloc.py b/cuda_malloc.py index e586d3eff..d033529cc 100644 --- a/cuda_malloc.py +++ b/cuda_malloc.py @@ -41,7 +41,7 @@ def cuda_malloc_supported(): "GeForce 940M", "GeForce 930M", "GeForce 920M", "GeForce 910M", "GeForce GTX 750", "GeForce GTX 745", "Quadro K620", "Quadro K1200", "Quadro K2200", "Quadro M500", "Quadro M520", "Quadro M600", "Quadro M620", "Quadro M1000", "Quadro M1200", "Quadro M2000", "Quadro M2200", "Quadro M3000", "Quadro M4000", "Quadro M5000", "Quadro M5500", "Quadro M6000", - "GeForce MX110", "GeForce MX130"} + "GeForce MX110", "GeForce MX130", "GeForce 830M", "GeForce 840M", "GeForce GTX 850M", "GeForce GTX 860M"} try: names = get_gpu_names() From 0cb14a33f607da8e93c4ab02047170deb6a33dae Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sat, 5 Aug 2023 21:53:57 -0400 Subject: [PATCH 43/46] Fix issue with logging missing nodes. --- web/scripts/app.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/scripts/app.js b/web/scripts/app.js index 61324406d..40156abc3 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -1310,7 +1310,7 @@ export class ComfyApp { ).join("")}Nodes that have failed to load will show as red on the graph.` ); this.logging.addEntry("Comfy.App", "warn", { - MissingNodes: nodes, + MissingNodes: missingNodeTypes, }); } } From d8e58f0a7ea914377c56f9b32e449ef4c65da572 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sun, 6 Aug 2023 14:08:59 -0400 Subject: [PATCH 44/46] Detect hint_channels from controlnet. --- comfy/sd.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/sd.py b/comfy/sd.py index 7511bb501..2996a938b 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -878,7 +878,7 @@ def load_controlnet(ckpt_path, model=None): use_fp16 = model_management.should_use_fp16() controlnet_config = model_detection.model_config_from_unet(controlnet_data, prefix, use_fp16).unet_config controlnet_config.pop("out_channels") - controlnet_config["hint_channels"] = 3 + controlnet_config["hint_channels"] = controlnet_data["{}input_hint_block.0.weight".format(prefix)].shape[1] control_model = cldm.ControlNet(**controlnet_config) if pth: From 0ce8a540cefa0b61705134b1eb4f1d67c6f3f4ba Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sun, 6 Aug 2023 14:36:43 -0400 Subject: [PATCH 45/46] Update litegraph to latest. --- web/lib/litegraph.core.js | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/web/lib/litegraph.core.js b/web/lib/litegraph.core.js index 2a33bd4a7..2682ff309 100644 --- a/web/lib/litegraph.core.js +++ b/web/lib/litegraph.core.js @@ -9835,7 +9835,11 @@ LGraphNode.prototype.executeAction = function(action) ctx.textAlign = "center"; ctx.fillStyle = text_color; ctx.fillText( - w.label || w.name + " " + Number(w.value).toFixed(3), + w.label || w.name + " " + Number(w.value).toFixed( + w.options.precision != null + ? w.options.precision + : 3 + ), widget_width * 0.5, y + H * 0.7 ); @@ -13835,7 +13839,7 @@ LGraphNode.prototype.executeAction = function(action) if (!disabled) { element.addEventListener("click", inner_onclick); } - if (options.autoopen) { + if (!disabled && options.autoopen) { LiteGraph.pointerListenerAdd(element,"enter",inner_over); } From 1f0f4cc0bdb5483d0ef87bc4edad4b99a3b97d77 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 7 Aug 2023 02:22:26 -0400 Subject: [PATCH 46/46] Add argument to disable auto launching the browser. --- comfy/cli_args.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/comfy/cli_args.py b/comfy/cli_args.py index 83d8cd287..81bbc4796 100644 --- a/comfy/cli_args.py +++ b/comfy/cli_args.py @@ -39,6 +39,7 @@ parser.add_argument("--enable-cors-header", type=str, default=None, metavar="ORI parser.add_argument("--extra-model-paths-config", type=str, default=None, metavar="PATH", nargs='+', action='append', help="Load one or more extra_model_paths.yaml files.") parser.add_argument("--output-directory", type=str, default=None, help="Set the ComfyUI output directory.") parser.add_argument("--auto-launch", action="store_true", help="Automatically launch ComfyUI in the default browser.") +parser.add_argument("--disable-auto-launch", action="store_true", help="Disable auto launching the browser.") parser.add_argument("--cuda-device", type=int, default=None, metavar="DEVICE_ID", help="Set the id of the cuda device this instance will use.") cm_group = parser.add_mutually_exclusive_group() cm_group.add_argument("--cuda-malloc", action="store_true", help="Enable cudaMallocAsync (enabled by default for torch 2.0 and up).") @@ -90,3 +91,6 @@ args = parser.parse_args() if args.windows_standalone_build: args.auto_launch = True + +if args.disable_auto_launch: + args.auto_launch = False