Merge remote-tracking branch 'upstream/master' into addBatchIndex

This commit is contained in:
flyingshutter 2023-04-22 14:33:05 +02:00
commit aff322fef2
37 changed files with 3257 additions and 283 deletions

View File

@ -0,0 +1,30 @@
name: "Windows Release cu118 dependencies 2"
on:
workflow_dispatch:
# push:
# branches:
# - master
jobs:
build_dependencies:
runs-on: windows-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: '3.10.9'
- shell: bash
run: |
python -m pip wheel --no-cache-dir torch torchvision torchaudio xformers==0.0.19.dev516 --extra-index-url https://download.pytorch.org/whl/cu118 -r requirements.txt pygit2 -w ./temp_wheel_dir
python -m pip install --no-cache-dir ./temp_wheel_dir/*
echo installed basic
ls -lah temp_wheel_dir
mv temp_wheel_dir cu118_python_deps
tar cf cu118_python_deps.tar cu118_python_deps
- uses: actions/cache/save@v3
with:
path: cu118_python_deps.tar
key: ${{ runner.os }}-build-cu118

1
.gitignore vendored
View File

@ -8,3 +8,4 @@ temp/
custom_nodes/
!custom_nodes/example_node.py.example
extra_model_paths.yaml
/.vs

View File

@ -25,6 +25,7 @@ This ui will let you design and execute advanced stable diffusion pipelines usin
- [ControlNet and T2I-Adapter](https://comfyanonymous.github.io/ComfyUI_examples/controlnet/)
- [Upscale Models (ESRGAN, ESRGAN variants, SwinIR, Swin2SR, etc...)](https://comfyanonymous.github.io/ComfyUI_examples/upscale_models/)
- [unCLIP Models](https://comfyanonymous.github.io/ComfyUI_examples/unclip/)
- [GLIGEN](https://comfyanonymous.github.io/ComfyUI_examples/gligen/)
- Starts up very fast.
- Works fully offline: will never download anything.
- [Config file](extra_model_paths.yaml.example) to set the search paths for models.
@ -32,9 +33,28 @@ This ui will let you design and execute advanced stable diffusion pipelines usin
Workflow examples can be found on the [Examples page](https://comfyanonymous.github.io/ComfyUI_examples/)
## Shortcuts
- **Ctrl + A** select all nodes
- **Ctrl + M** mute/unmute selected nodes
- **Delete** or **Backspace** delete selected nodes
| Keybind | Explanation |
| - | - |
| Ctrl + Enter | Queue up current graph for generation |
| Ctrl + Shift + Enter | Queue up current graph as first for generation |
| Ctrl + S | Save workflow |
| Ctrl + O | Load workflow |
| Ctrl + A | Select all nodes |
| Ctrl + M | Mute/unmute selected nodes |
| Delete/Backspace | Delete selected nodes |
| Ctrl + Delete/Backspace | Delete the current graph |
| Space | Move the canvas around when held and moving the cursor |
| Ctrl/Shift + Click | Add clicked node to selection |
| Ctrl + C/Ctrl + V | Copy and paste selected nodes (without maintaining connections to outputs of unselected nodes) |
| Ctrl + C/Ctrl + Shift + V| Copy and paste selected nodes (maintaining connections from outputs of unselected nodes to inputs of pasted nodes) |
| Shift + Drag | Move multiple selected nodes at the same time |
| Ctrl + D | Load default graph |
| Q | Toggle visibility of the queue |
| H | Toggle visibility of history |
| R | Refresh graph |
Ctrl can also be replaced with Cmd instead for MacOS users
# Installing
@ -64,7 +84,7 @@ Put your VAE in: models/vae
At the time of writing this pytorch has issues with python versions higher than 3.10 so make sure your python/pip versions are 3.10.
### AMD (Linux only)
### AMD GPUs (Linux only)
AMD users can install rocm and pytorch with pip if you don't have it already installed, this is the command to install the stable version:
```pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/rocm5.4.2```
@ -92,6 +112,11 @@ Install the dependencies by opening your terminal inside the ComfyUI folder and:
After this you should have everything installed and can proceed to running ComfyUI.
### Others:
[Intel Arc](https://github.com/comfyanonymous/ComfyUI/discussions/476)
Mac/MPS: There is basic support in the code but until someone makes some install instruction you are on your own.
### I already have another UI for Stable Diffusion installed do I really have to install all of these dependencies?

343
comfy/gligen.py Normal file
View File

@ -0,0 +1,343 @@
import torch
from torch import nn, einsum
from ldm.modules.attention import CrossAttention
from inspect import isfunction
def exists(val):
return val is not None
def uniq(arr):
return{el: True for el in arr}.keys()
def default(val, d):
if exists(val):
return val
return d() if isfunction(d) else d
# feedforward
class GEGLU(nn.Module):
def __init__(self, dim_in, dim_out):
super().__init__()
self.proj = nn.Linear(dim_in, dim_out * 2)
def forward(self, x):
x, gate = self.proj(x).chunk(2, dim=-1)
return x * torch.nn.functional.gelu(gate)
class FeedForward(nn.Module):
def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.):
super().__init__()
inner_dim = int(dim * mult)
dim_out = default(dim_out, dim)
project_in = nn.Sequential(
nn.Linear(dim, inner_dim),
nn.GELU()
) if not glu else GEGLU(dim, inner_dim)
self.net = nn.Sequential(
project_in,
nn.Dropout(dropout),
nn.Linear(inner_dim, dim_out)
)
def forward(self, x):
return self.net(x)
class GatedCrossAttentionDense(nn.Module):
def __init__(self, query_dim, context_dim, n_heads, d_head):
super().__init__()
self.attn = CrossAttention(
query_dim=query_dim,
context_dim=context_dim,
heads=n_heads,
dim_head=d_head)
self.ff = FeedForward(query_dim, glu=True)
self.norm1 = nn.LayerNorm(query_dim)
self.norm2 = nn.LayerNorm(query_dim)
self.register_parameter('alpha_attn', nn.Parameter(torch.tensor(0.)))
self.register_parameter('alpha_dense', nn.Parameter(torch.tensor(0.)))
# this can be useful: we can externally change magnitude of tanh(alpha)
# for example, when it is set to 0, then the entire model is same as
# original one
self.scale = 1
def forward(self, x, objs):
x = x + self.scale * \
torch.tanh(self.alpha_attn) * self.attn(self.norm1(x), objs, objs)
x = x + self.scale * \
torch.tanh(self.alpha_dense) * self.ff(self.norm2(x))
return x
class GatedSelfAttentionDense(nn.Module):
def __init__(self, query_dim, context_dim, n_heads, d_head):
super().__init__()
# we need a linear projection since we need cat visual feature and obj
# feature
self.linear = nn.Linear(context_dim, query_dim)
self.attn = CrossAttention(
query_dim=query_dim,
context_dim=query_dim,
heads=n_heads,
dim_head=d_head)
self.ff = FeedForward(query_dim, glu=True)
self.norm1 = nn.LayerNorm(query_dim)
self.norm2 = nn.LayerNorm(query_dim)
self.register_parameter('alpha_attn', nn.Parameter(torch.tensor(0.)))
self.register_parameter('alpha_dense', nn.Parameter(torch.tensor(0.)))
# this can be useful: we can externally change magnitude of tanh(alpha)
# for example, when it is set to 0, then the entire model is same as
# original one
self.scale = 1
def forward(self, x, objs):
N_visual = x.shape[1]
objs = self.linear(objs)
x = x + self.scale * torch.tanh(self.alpha_attn) * self.attn(
self.norm1(torch.cat([x, objs], dim=1)))[:, 0:N_visual, :]
x = x + self.scale * \
torch.tanh(self.alpha_dense) * self.ff(self.norm2(x))
return x
class GatedSelfAttentionDense2(nn.Module):
def __init__(self, query_dim, context_dim, n_heads, d_head):
super().__init__()
# we need a linear projection since we need cat visual feature and obj
# feature
self.linear = nn.Linear(context_dim, query_dim)
self.attn = CrossAttention(
query_dim=query_dim, context_dim=query_dim, dim_head=d_head)
self.ff = FeedForward(query_dim, glu=True)
self.norm1 = nn.LayerNorm(query_dim)
self.norm2 = nn.LayerNorm(query_dim)
self.register_parameter('alpha_attn', nn.Parameter(torch.tensor(0.)))
self.register_parameter('alpha_dense', nn.Parameter(torch.tensor(0.)))
# this can be useful: we can externally change magnitude of tanh(alpha)
# for example, when it is set to 0, then the entire model is same as
# original one
self.scale = 1
def forward(self, x, objs):
B, N_visual, _ = x.shape
B, N_ground, _ = objs.shape
objs = self.linear(objs)
# sanity check
size_v = math.sqrt(N_visual)
size_g = math.sqrt(N_ground)
assert int(size_v) == size_v, "Visual tokens must be square rootable"
assert int(size_g) == size_g, "Grounding tokens must be square rootable"
size_v = int(size_v)
size_g = int(size_g)
# select grounding token and resize it to visual token size as residual
out = self.attn(self.norm1(torch.cat([x, objs], dim=1)))[
:, N_visual:, :]
out = out.permute(0, 2, 1).reshape(B, -1, size_g, size_g)
out = torch.nn.functional.interpolate(
out, (size_v, size_v), mode='bicubic')
residual = out.reshape(B, -1, N_visual).permute(0, 2, 1)
# add residual to visual feature
x = x + self.scale * torch.tanh(self.alpha_attn) * residual
x = x + self.scale * \
torch.tanh(self.alpha_dense) * self.ff(self.norm2(x))
return x
class FourierEmbedder():
def __init__(self, num_freqs=64, temperature=100):
self.num_freqs = num_freqs
self.temperature = temperature
self.freq_bands = temperature ** (torch.arange(num_freqs) / num_freqs)
@torch.no_grad()
def __call__(self, x, cat_dim=-1):
"x: arbitrary shape of tensor. dim: cat dim"
out = []
for freq in self.freq_bands:
out.append(torch.sin(freq * x))
out.append(torch.cos(freq * x))
return torch.cat(out, cat_dim)
class PositionNet(nn.Module):
def __init__(self, in_dim, out_dim, fourier_freqs=8):
super().__init__()
self.in_dim = in_dim
self.out_dim = out_dim
self.fourier_embedder = FourierEmbedder(num_freqs=fourier_freqs)
self.position_dim = fourier_freqs * 2 * 4 # 2 is sin&cos, 4 is xyxy
self.linears = nn.Sequential(
nn.Linear(self.in_dim + self.position_dim, 512),
nn.SiLU(),
nn.Linear(512, 512),
nn.SiLU(),
nn.Linear(512, out_dim),
)
self.null_positive_feature = torch.nn.Parameter(
torch.zeros([self.in_dim]))
self.null_position_feature = torch.nn.Parameter(
torch.zeros([self.position_dim]))
def forward(self, boxes, masks, positive_embeddings):
B, N, _ = boxes.shape
masks = masks.unsqueeze(-1)
# embedding position (it may includes padding as placeholder)
xyxy_embedding = self.fourier_embedder(boxes) # B*N*4 --> B*N*C
# learnable null embedding
positive_null = self.null_positive_feature.view(1, 1, -1)
xyxy_null = self.null_position_feature.view(1, 1, -1)
# replace padding with learnable null embedding
positive_embeddings = positive_embeddings * \
masks + (1 - masks) * positive_null
xyxy_embedding = xyxy_embedding * masks + (1 - masks) * xyxy_null
objs = self.linears(
torch.cat([positive_embeddings, xyxy_embedding], dim=-1))
assert objs.shape == torch.Size([B, N, self.out_dim])
return objs
class Gligen(nn.Module):
def __init__(self, modules, position_net, key_dim):
super().__init__()
self.module_list = nn.ModuleList(modules)
self.position_net = position_net
self.key_dim = key_dim
self.max_objs = 30
def _set_position(self, boxes, masks, positive_embeddings):
objs = self.position_net(boxes, masks, positive_embeddings)
def func(key, x):
module = self.module_list[key]
return module(x, objs)
return func
def set_position(self, latent_image_shape, position_params, device):
batch, c, h, w = latent_image_shape
masks = torch.zeros([self.max_objs], device="cpu")
boxes = []
positive_embeddings = []
for p in position_params:
x1 = (p[4]) / w
y1 = (p[3]) / h
x2 = (p[4] + p[2]) / w
y2 = (p[3] + p[1]) / h
masks[len(boxes)] = 1.0
boxes += [torch.tensor((x1, y1, x2, y2)).unsqueeze(0)]
positive_embeddings += [p[0]]
append_boxes = []
append_conds = []
if len(boxes) < self.max_objs:
append_boxes = [torch.zeros(
[self.max_objs - len(boxes), 4], device="cpu")]
append_conds = [torch.zeros(
[self.max_objs - len(boxes), self.key_dim], device="cpu")]
box_out = torch.cat(
boxes + append_boxes).unsqueeze(0).repeat(batch, 1, 1)
masks = masks.unsqueeze(0).repeat(batch, 1)
conds = torch.cat(positive_embeddings +
append_conds).unsqueeze(0).repeat(batch, 1, 1)
return self._set_position(
box_out.to(device),
masks.to(device),
conds.to(device))
def set_empty(self, latent_image_shape, device):
batch, c, h, w = latent_image_shape
masks = torch.zeros([self.max_objs], device="cpu").repeat(batch, 1)
box_out = torch.zeros([self.max_objs, 4],
device="cpu").repeat(batch, 1, 1)
conds = torch.zeros([self.max_objs, self.key_dim],
device="cpu").repeat(batch, 1, 1)
return self._set_position(
box_out.to(device),
masks.to(device),
conds.to(device))
def cleanup(self):
pass
def get_models(self):
return [self]
def load_gligen(sd):
sd_k = sd.keys()
output_list = []
key_dim = 768
for a in ["input_blocks", "middle_block", "output_blocks"]:
for b in range(20):
k_temp = filter(lambda k: "{}.{}.".format(a, b)
in k and ".fuser." in k, sd_k)
k_temp = map(lambda k: (k, k.split(".fuser.")[-1]), k_temp)
n_sd = {}
for k in k_temp:
n_sd[k[1]] = sd[k[0]]
if len(n_sd) > 0:
query_dim = n_sd["linear.weight"].shape[0]
key_dim = n_sd["linear.weight"].shape[1]
if key_dim == 768: # SD1.x
n_heads = 8
d_head = query_dim // n_heads
else:
d_head = 64
n_heads = query_dim // d_head
gated = GatedSelfAttentionDense(
query_dim, key_dim, n_heads, d_head)
gated.load_state_dict(n_sd, strict=False)
output_list.append(gated)
if "position_net.null_positive_feature" in sd_k:
in_dim = sd["position_net.null_positive_feature"].shape[0]
out_dim = sd["position_net.linears.4.weight"].shape[0]
class WeightsLoader(torch.nn.Module):
pass
w = WeightsLoader()
w.position_net = PositionNet(in_dim, out_dim)
w.load_state_dict(sd, strict=False)
gligen = Gligen(output_list, w.position_net, key_dim)
return gligen

View File

@ -9,7 +9,7 @@ from typing import Optional, Any
from ldm.modules.diffusionmodules.util import checkpoint
from .sub_quadratic_attention import efficient_dot_product_attention
import model_management
from comfy import model_management
from . import tomesd
@ -510,6 +510,14 @@ class BasicTransformerBlock(nn.Module):
return checkpoint(self._forward, (x, context, transformer_options), self.parameters(), self.checkpoint)
def _forward(self, x, context=None, transformer_options={}):
current_index = None
if "current_index" in transformer_options:
current_index = transformer_options["current_index"]
if "patches" in transformer_options:
transformer_patches = transformer_options["patches"]
else:
transformer_patches = {}
n = self.norm1(x)
if "tomesd" in transformer_options:
m, u = tomesd.get_functions(x, transformer_options["tomesd"]["ratio"], transformer_options["original_shape"])
@ -518,11 +526,19 @@ class BasicTransformerBlock(nn.Module):
n = self.attn1(n, context=context if self.disable_self_attn else None)
x += n
if "middle_patch" in transformer_patches:
patch = transformer_patches["middle_patch"]
for p in patch:
x = p(current_index, x)
n = self.norm2(x)
n = self.attn2(n, context=context)
x += n
x = self.ff(self.norm3(x)) + x
if current_index is not None:
transformer_options["current_index"] += 1
return x

View File

@ -7,7 +7,7 @@ from einops import rearrange
from typing import Optional, Any
from ldm.modules.attention import MemoryEfficientCrossAttention
import model_management
from comfy import model_management
if model_management.xformers_enabled_vae():
import xformers

View File

@ -782,6 +782,8 @@ class UNetModel(nn.Module):
:return: an [N x C x ...] Tensor of outputs.
"""
transformer_options["original_shape"] = list(x.shape)
transformer_options["current_index"] = 0
assert (y is not None) == (
self.num_classes is not None
), "must specify y if and only if the model is class-conditional"

View File

@ -24,7 +24,7 @@ except ImportError:
from torch import Tensor
from typing import List
import model_management
from comfy import model_management
def dynamic_slice(
x: Tensor,

View File

@ -176,7 +176,7 @@ def load_model_gpu(model):
model_accelerated = True
return current_loaded_model
def load_controlnet_gpu(models):
def load_controlnet_gpu(control_models):
global current_gpu_controlnets
global vram_state
if vram_state == VRAMState.CPU:
@ -186,6 +186,10 @@ def load_controlnet_gpu(models):
#don't load controlnets like this if low vram because they will be loaded right before running and unloaded right after
return
models = []
for m in control_models:
models += m.get_models()
for m in current_gpu_controlnets:
if m not in models:
m.cpu()
@ -307,6 +311,15 @@ def should_use_fp16():
return True
def soft_empty_cache():
global xpu_available
if xpu_available:
torch.xpu.empty_cache()
elif torch.cuda.is_available():
if torch.version.cuda: #This seems to make things worse on ROCm so I only do it for cuda
torch.cuda.empty_cache()
torch.cuda.ipc_collect()
#TODO: might be cleaner to put this somewhere else
import threading

View File

@ -3,27 +3,10 @@ from .k_diffusion import external as k_diffusion_external
from .extra_samplers import uni_pc
import torch
import contextlib
import model_management
from comfy import model_management
from .ldm.models.diffusion.ddim import DDIMSampler
from .ldm.modules.diffusionmodules.util import make_ddim_timesteps
class CFGDenoiser(torch.nn.Module):
def __init__(self, model):
super().__init__()
self.inner_model = model
def forward(self, x, sigma, uncond, cond, cond_scale):
if len(uncond[0]) == len(cond[0]) and x.shape[0] * x.shape[2] * x.shape[3] < (96 * 96): #TODO check memory instead
x_in = torch.cat([x] * 2)
sigma_in = torch.cat([sigma] * 2)
cond_in = torch.cat([uncond, cond])
uncond, cond = self.inner_model(x_in, sigma_in, cond=cond_in).chunk(2)
else:
cond = self.inner_model(x, sigma, cond=cond)
uncond = self.inner_model(x, sigma, cond=uncond)
return uncond + (cond - uncond) * cond_scale
#The main sampling function shared by all the samplers
#Returns predicted noise
def sampling_function(model_function, x, timestep, uncond, cond, cond_scale, cond_concat=None, model_options={}):
@ -36,8 +19,8 @@ def sampling_function(model_function, x, timestep, uncond, cond, cond_scale, con
strength = cond[1]['strength']
adm_cond = None
if 'adm' in cond[1]:
adm_cond = cond[1]['adm']
if 'adm_encoded' in cond[1]:
adm_cond = cond[1]['adm_encoded']
input_x = x_in[:,:,area[2]:area[0] + area[2],area[3]:area[1] + area[3]]
mult = torch.ones_like(input_x) * strength
@ -70,7 +53,21 @@ def sampling_function(model_function, x, timestep, uncond, cond, cond_scale, con
control = None
if 'control' in cond[1]:
control = cond[1]['control']
return (input_x, mult, conditionning, area, control)
patches = None
if 'gligen' in cond[1]:
gligen = cond[1]['gligen']
patches = {}
gligen_type = gligen[0]
gligen_model = gligen[1]
if gligen_type == "position":
gligen_patch = gligen_model.set_position(input_x.shape, gligen[2], input_x.device)
else:
gligen_patch = gligen_model.set_empty(input_x.shape, input_x.device)
patches['middle_patch'] = [gligen_patch]
return (input_x, mult, conditionning, area, control, patches)
def cond_equal_size(c1, c2):
if c1 is c2:
@ -91,12 +88,21 @@ def sampling_function(model_function, x, timestep, uncond, cond, cond_scale, con
def can_concat_cond(c1, c2):
if c1[0].shape != c2[0].shape:
return False
#control
if (c1[4] is None) != (c2[4] is None):
return False
if c1[4] is not None:
if c1[4] is not c2[4]:
return False
#patches
if (c1[5] is None) != (c2[5] is None):
return False
if (c1[5] is not None):
if c1[5] is not c2[5]:
return False
return cond_equal_size(c1[2], c2[2])
def cond_cat(c_list):
@ -166,6 +172,7 @@ def sampling_function(model_function, x, timestep, uncond, cond, cond_scale, con
cond_or_uncond = []
area = []
control = None
patches = None
for x in to_batch:
o = to_run.pop(x)
p = o[0]
@ -175,6 +182,7 @@ def sampling_function(model_function, x, timestep, uncond, cond, cond_scale, con
area += [p[3]]
cond_or_uncond += [o[1]]
control = p[4]
patches = p[5]
batch_chunks = len(cond_or_uncond)
input_x = torch.cat(input_x)
@ -184,8 +192,14 @@ def sampling_function(model_function, x, timestep, uncond, cond, cond_scale, con
if control is not None:
c['control'] = control.get_control(input_x, timestep_, c['c_crossattn'], len(cond_or_uncond))
transformer_options = {}
if 'transformer_options' in model_options:
c['transformer_options'] = model_options['transformer_options']
transformer_options = model_options['transformer_options'].copy()
if patches is not None:
transformer_options["patches"] = patches
c['transformer_options'] = transformer_options
output = model_function(input_x, timestep_, cond=c).chunk(batch_chunks)
del input_x
@ -211,7 +225,10 @@ def sampling_function(model_function, x, timestep, uncond, cond, cond_scale, con
max_total_area = model_management.maximum_batch_area()
cond, uncond = calc_cond_uncond_batch(model_function, cond, uncond, x, timestep, max_total_area, cond_concat, model_options)
return uncond + (cond - uncond) * cond_scale
if "sampler_cfg_function" in model_options:
return model_options["sampler_cfg_function"](cond, uncond, cond_scale)
else:
return uncond + (cond - uncond) * cond_scale
class CompVisVDenoiser(k_diffusion_external.DiscreteVDDPMDenoiser):
@ -306,8 +323,7 @@ def create_cond_with_same_area_if_none(conds, c):
n = c[1].copy()
conds += [[smallest[0], n]]
def apply_control_net_to_equal_area(conds, uncond):
def apply_empty_x_to_equal_area(conds, uncond, name, uncond_fill_func):
cond_cnets = []
cond_other = []
uncond_cnets = []
@ -315,15 +331,15 @@ def apply_control_net_to_equal_area(conds, uncond):
for t in range(len(conds)):
x = conds[t]
if 'area' not in x[1]:
if 'control' in x[1] and x[1]['control'] is not None:
cond_cnets.append(x[1]['control'])
if name in x[1] and x[1][name] is not None:
cond_cnets.append(x[1][name])
else:
cond_other.append((x, t))
for t in range(len(uncond)):
x = uncond[t]
if 'area' not in x[1]:
if 'control' in x[1] and x[1]['control'] is not None:
uncond_cnets.append(x[1]['control'])
if name in x[1] and x[1][name] is not None:
uncond_cnets.append(x[1][name])
else:
uncond_other.append((x, t))
@ -333,15 +349,16 @@ def apply_control_net_to_equal_area(conds, uncond):
for x in range(len(cond_cnets)):
temp = uncond_other[x % len(uncond_other)]
o = temp[0]
if 'control' in o[1] and o[1]['control'] is not None:
if name in o[1] and o[1][name] is not None:
n = o[1].copy()
n['control'] = cond_cnets[x]
n[name] = uncond_fill_func(cond_cnets, x)
uncond += [[o[0], n]]
else:
n = o[1].copy()
n['control'] = cond_cnets[x]
n[name] = uncond_fill_func(cond_cnets, x)
uncond[temp[1]] = [o[0], n]
def encode_adm(noise_augmentor, conds, batch_size, device):
for t in range(len(conds)):
x = conds[t]
@ -371,10 +388,11 @@ def encode_adm(noise_augmentor, conds, batch_size, device):
else:
adm_out = torch.zeros((1, noise_augmentor.time_embed.dim * 2), device=device)
x[1] = x[1].copy()
x[1]["adm"] = torch.cat([adm_out] * batch_size)
x[1]["adm_encoded"] = torch.cat([adm_out] * batch_size)
return conds
class KSampler:
SCHEDULERS = ["karras", "normal", "simple", "ddim_uniform"]
SAMPLERS = ["euler", "euler_ancestral", "heun", "dpm_2", "dpm_2_ancestral",
@ -463,7 +481,8 @@ class KSampler:
for c in negative:
create_cond_with_same_area_if_none(positive, c)
apply_control_net_to_equal_area(positive, negative)
apply_empty_x_to_equal_area(positive, negative, 'control', lambda cond_cnets, x: cond_cnets[x])
apply_empty_x_to_equal_area(positive, negative, 'gligen', lambda cond_cnets, x: cond_cnets[x])
if self.model.model.diffusion_model.dtype == torch.float16:
precision_scope = torch.autocast

View File

@ -4,7 +4,7 @@ import copy
import sd1_clip
import sd2_clip
import model_management
from comfy import model_management
from .ldm.util import instantiate_from_config
from .ldm.models.autoencoder import AutoencoderKL
import yaml
@ -13,6 +13,7 @@ from .t2i_adapter import adapter
from . import utils
from . import clip_vision
from . import gligen
def load_model_weights(model, sd, verbose=False, load_state_dict_to=[]):
m, u = model.load_state_dict(sd, strict=False)
@ -250,6 +251,9 @@ class ModelPatcher:
def set_model_tomesd(self, ratio):
self.model_options["transformer_options"]["tomesd"] = {"ratio": ratio}
def set_model_sampler_cfg_function(self, sampler_cfg_function):
self.model_options["sampler_cfg_function"] = sampler_cfg_function
def model_dtype(self):
return self.model.diffusion_model.dtype
@ -372,10 +376,12 @@ class CLIP:
def clip_layer(self, layer_idx):
self.layer_idx = layer_idx
def encode(self, text):
def tokenize(self, text, return_word_ids=False):
return self.tokenizer.tokenize_with_weights(text, return_word_ids)
def encode_from_tokens(self, tokens, return_pooled=False):
if self.layer_idx is not None:
self.cond_stage_model.clip_layer(self.layer_idx)
tokens = self.tokenizer.tokenize_with_weights(text)
try:
self.patcher.patch_model()
cond = self.cond_stage_model.encode_token_weights(tokens)
@ -383,8 +389,16 @@ class CLIP:
except Exception as e:
self.patcher.unpatch_model()
raise e
if return_pooled:
eos_token_index = max(range(len(tokens[0])), key=tokens[0].__getitem__)
pooled = cond[:, eos_token_index]
return cond, pooled
return cond
def encode(self, text):
tokens = self.tokenize(text)
return self.encode_from_tokens(tokens)
class VAE:
def __init__(self, ckpt_path=None, scale_factor=0.18215, device=None, config=None):
if config is None:
@ -555,10 +569,10 @@ class ControlNet:
c.strength = self.strength
return c
def get_control_models(self):
def get_models(self):
out = []
if self.previous_controlnet is not None:
out += self.previous_controlnet.get_control_models()
out += self.previous_controlnet.get_models()
out.append(self.control_model)
return out
@ -728,10 +742,10 @@ class T2IAdapter:
del self.cond_hint
self.cond_hint = None
def get_control_models(self):
def get_models(self):
out = []
if self.previous_controlnet is not None:
out += self.previous_controlnet.get_control_models()
out += self.previous_controlnet.get_models()
return out
def load_t2i_adapter(t2i_data):
@ -778,6 +792,13 @@ def load_clip(ckpt_path, embedding_directory=None):
clip.load_from_state_dict(clip_data)
return clip
def load_gligen(ckpt_path):
data = utils.load_torch_file(ckpt_path)
model = gligen.load_gligen(data)
if model_management.should_use_fp16():
model = model.half()
return model
def load_checkpoint(config_path, ckpt_path, output_vae=True, output_clip=True, embedding_directory=None):
with open(config_path, 'r') as stream:
config = yaml.safe_load(stream)

View File

@ -2,6 +2,8 @@ import os
from transformers import CLIPTokenizer, CLIPTextModel, CLIPTextConfig
import torch
import traceback
import zipfile
class ClipTokenWeightEncoder:
def encode_token_weights(self, token_weight_pairs):
@ -170,6 +172,26 @@ def unescape_important(text):
text = text.replace("\0\2", "(")
return text
def safe_load_embed_zip(embed_path):
with zipfile.ZipFile(embed_path) as myzip:
names = list(filter(lambda a: "data/" in a, myzip.namelist()))
names.reverse()
for n in names:
with myzip.open(n) as myfile:
data = myfile.read()
number = len(data) // 4
length_embed = 1024 #sd2.x
if number < 768:
continue
if number % 768 == 0:
length_embed = 768 #sd1.x
num_embeds = number // length_embed
embed = torch.frombuffer(data, dtype=torch.float)
out = embed.reshape((num_embeds, length_embed)).clone()
del embed
return out
def load_embed(embedding_name, embedding_directory):
if isinstance(embedding_directory, str):
embedding_directory = [embedding_directory]
@ -194,19 +216,33 @@ def load_embed(embedding_name, embedding_directory):
embed_path = valid_file
if embed_path.lower().endswith(".safetensors"):
import safetensors.torch
embed = safetensors.torch.load_file(embed_path, device="cpu")
else:
if 'weights_only' in torch.load.__code__.co_varnames:
embed = torch.load(embed_path, weights_only=True, map_location="cpu")
embed_out = None
try:
if embed_path.lower().endswith(".safetensors"):
import safetensors.torch
embed = safetensors.torch.load_file(embed_path, device="cpu")
else:
embed = torch.load(embed_path, map_location="cpu")
if 'string_to_param' in embed:
values = embed['string_to_param'].values()
else:
values = embed.values()
return next(iter(values))
if 'weights_only' in torch.load.__code__.co_varnames:
try:
embed = torch.load(embed_path, weights_only=True, map_location="cpu")
except:
embed_out = safe_load_embed_zip(embed_path)
else:
embed = torch.load(embed_path, map_location="cpu")
except Exception as e:
print(traceback.format_exc())
print()
print("error loading embedding, skipping loading:", embedding_name)
return None
if embed_out is None:
if 'string_to_param' in embed:
values = embed['string_to_param'].values()
else:
values = embed.values()
embed_out = next(iter(values))
return embed_out
class SD1Tokenizer:
def __init__(self, tokenizer_path=None, max_length=77, pad_with_end=True, embedding_directory=None):
@ -224,60 +260,97 @@ class SD1Tokenizer:
self.inv_vocab = {v: k for k, v in vocab.items()}
self.embedding_directory = embedding_directory
self.max_word_length = 8
self.embedding_identifier = "embedding:"
def _try_get_embedding(self, embedding_name:str):
'''
Takes a potential embedding name and tries to retrieve it.
Returns a Tuple consisting of the embedding and any leftover string, embedding can be None.
'''
embed = load_embed(embedding_name, self.embedding_directory)
if embed is None:
stripped = embedding_name.strip(',')
if len(stripped) < len(embedding_name):
embed = load_embed(stripped, self.embedding_directory)
return (embed, embedding_name[len(stripped):])
return (embed, "")
def tokenize_with_weights(self, text:str, return_word_ids=False):
'''
Takes a prompt and converts it to a list of (token, weight, word id) elements.
Tokens can both be integer tokens and pre computed CLIP tensors.
Word id values are unique per word and embedding, where the id 0 is reserved for non word tokens.
Returned list has the dimensions NxM where M is the input size of CLIP
'''
if self.pad_with_end:
pad_token = self.end_token
else:
pad_token = 0
def tokenize_with_weights(self, text):
text = escape_important(text)
parsed_weights = token_weights(text, 1.0)
#tokenize words
tokens = []
for t in parsed_weights:
to_tokenize = unescape_important(t[0]).replace("\n", " ").split(' ')
while len(to_tokenize) > 0:
word = to_tokenize.pop(0)
temp_tokens = []
embedding_identifier = "embedding:"
if word.startswith(embedding_identifier) and self.embedding_directory is not None:
embedding_name = word[len(embedding_identifier):].strip('\n')
embed = load_embed(embedding_name, self.embedding_directory)
for weighted_segment, weight in parsed_weights:
to_tokenize = unescape_important(weighted_segment).replace("\n", " ").split(' ')
to_tokenize = [x for x in to_tokenize if x != ""]
for word in to_tokenize:
#if we find an embedding, deal with the embedding
if word.startswith(self.embedding_identifier) and self.embedding_directory is not None:
embedding_name = word[len(self.embedding_identifier):].strip('\n')
embed, leftover = self._try_get_embedding(embedding_name)
if embed is None:
stripped = embedding_name.strip(',')
if len(stripped) < len(embedding_name):
embed = load_embed(stripped, self.embedding_directory)
if embed is not None:
to_tokenize.insert(0, embedding_name[len(stripped):])
if embed is not None:
if len(embed.shape) == 1:
temp_tokens += [(embed, t[1])]
else:
for x in range(embed.shape[0]):
temp_tokens += [(embed[x], t[1])]
print(f"warning, embedding:{embedding_name} does not exist, ignoring")
else:
print("warning, embedding:{} does not exist, ignoring".format(embedding_name))
elif len(word) > 0:
tt = self.tokenizer(word)["input_ids"][1:-1]
for x in tt:
temp_tokens += [(x, t[1])]
tokens_left = self.max_tokens_per_section - (len(tokens) % self.max_tokens_per_section)
if len(embed.shape) == 1:
tokens.append([(embed, weight)])
else:
tokens.append([(embed[x], weight) for x in range(embed.shape[0])])
#if we accidentally have leftover text, continue parsing using leftover, else move on to next word
if leftover != "":
word = leftover
else:
continue
#parse word
tokens.append([(t, weight) for t in self.tokenizer(word)["input_ids"][1:-1]])
#try not to split words in different sections
if tokens_left < len(temp_tokens) and len(temp_tokens) < (self.max_word_length):
for x in range(tokens_left):
tokens += [(self.end_token, 1.0)]
tokens += temp_tokens
#reshape token array to CLIP input size
batched_tokens = []
batch = [(self.start_token, 1.0, 0)]
batched_tokens.append(batch)
for i, t_group in enumerate(tokens):
#determine if we're going to try and keep the tokens in a single batch
is_large = len(t_group) >= self.max_word_length
out_tokens = []
for x in range(0, len(tokens), self.max_tokens_per_section):
o_token = [(self.start_token, 1.0)] + tokens[x:min(self.max_tokens_per_section + x, len(tokens))]
o_token += [(self.end_token, 1.0)]
if self.pad_with_end:
o_token +=[(self.end_token, 1.0)] * (self.max_length - len(o_token))
else:
o_token +=[(0, 1.0)] * (self.max_length - len(o_token))
while len(t_group) > 0:
if len(t_group) + len(batch) > self.max_length - 1:
remaining_length = self.max_length - len(batch) - 1
#break word in two and add end token
if is_large:
batch.extend([(t,w,i+1) for t,w in t_group[:remaining_length]])
batch.append((self.end_token, 1.0, 0))
t_group = t_group[remaining_length:]
#add end token and pad
else:
batch.append((self.end_token, 1.0, 0))
batch.extend([(pad_token, 1.0, 0)] * (remaining_length))
#start new batch
batch = [(self.start_token, 1.0, 0)]
batched_tokens.append(batch)
else:
batch.extend([(t,w,i+1) for t,w in t_group])
t_group = []
out_tokens += [o_token]
#fill last batch
batch.extend([(self.end_token, 1.0, 0)] + [(pad_token, 1.0, 0)] * (self.max_length - len(batch) - 1))
if not return_word_ids:
batched_tokens = [[(t, w) for t, w,_ in x] for x in batched_tokens]
return batched_tokens
return out_tokens
def untokenize(self, token_weight_pair):
return list(map(lambda a: (a, self.inv_vocab[a[0]]), token_weight_pair))

View File

@ -1,4 +1,4 @@
import sd1_clip
from comfy import sd1_clip
import torch
import os

262
comfy_extras/nodes_mask.py Normal file
View File

@ -0,0 +1,262 @@
import torch
from nodes import MAX_RESOLUTION
class LatentCompositeMasked:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"destination": ("LATENT",),
"source": ("LATENT",),
"x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
"y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
},
"optional": {
"mask": ("MASK",),
}
}
RETURN_TYPES = ("LATENT",)
FUNCTION = "composite"
CATEGORY = "latent"
def composite(self, destination, source, x, y, mask = None):
output = destination.copy()
destination = destination["samples"].clone()
source = source["samples"]
x = max(-source.shape[3] * 8, min(x, destination.shape[3] * 8))
y = max(-source.shape[2] * 8, min(y, destination.shape[2] * 8))
left, top = (x // 8, y // 8)
right, bottom = (left + source.shape[3], top + source.shape[2],)
if mask is None:
mask = torch.ones_like(source)
else:
mask = mask.clone()
mask = torch.nn.functional.interpolate(mask[None, None], size=(source.shape[2], source.shape[3]), mode="bilinear")
mask = mask.repeat((source.shape[0], source.shape[1], 1, 1))
# calculate the bounds of the source that will be overlapping the destination
# this prevents the source trying to overwrite latent pixels that are out of bounds
# of the destination
visible_width, visible_height = (destination.shape[3] - left + min(0, x), destination.shape[2] - top + min(0, y),)
mask = mask[:, :, :visible_height, :visible_width]
inverse_mask = torch.ones_like(mask) - mask
source_portion = mask * source[:, :, :visible_height, :visible_width]
destination_portion = inverse_mask * destination[:, :, top:bottom, left:right]
destination[:, :, top:bottom, left:right] = source_portion + destination_portion
output["samples"] = destination
return (output,)
class MaskToImage:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"mask": ("MASK",),
}
}
CATEGORY = "mask"
RETURN_TYPES = ("IMAGE",)
FUNCTION = "mask_to_image"
def mask_to_image(self, mask):
result = mask[None, :, :, None].expand(-1, -1, -1, 3)
return (result,)
class ImageToMask:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"image": ("IMAGE",),
"channel": (["red", "green", "blue"],),
}
}
CATEGORY = "mask"
RETURN_TYPES = ("MASK",)
FUNCTION = "image_to_mask"
def image_to_mask(self, image, channel):
channels = ["red", "green", "blue"]
mask = image[0, :, :, channels.index(channel)]
return (mask,)
class SolidMask:
@classmethod
def INPUT_TYPES(cls):
return {
"required": {
"value": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}),
"width": ("INT", {"default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1}),
"height": ("INT", {"default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1}),
}
}
CATEGORY = "mask"
RETURN_TYPES = ("MASK",)
FUNCTION = "solid"
def solid(self, value, width, height):
out = torch.full((height, width), value, dtype=torch.float32, device="cpu")
return (out,)
class InvertMask:
@classmethod
def INPUT_TYPES(cls):
return {
"required": {
"mask": ("MASK",),
}
}
CATEGORY = "mask"
RETURN_TYPES = ("MASK",)
FUNCTION = "invert"
def invert(self, mask):
out = 1.0 - mask
return (out,)
class CropMask:
@classmethod
def INPUT_TYPES(cls):
return {
"required": {
"mask": ("MASK",),
"x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
"y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
"width": ("INT", {"default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1}),
"height": ("INT", {"default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1}),
}
}
CATEGORY = "mask"
RETURN_TYPES = ("MASK",)
FUNCTION = "crop"
def crop(self, mask, x, y, width, height):
out = mask[y:y + height, x:x + width]
return (out,)
class MaskComposite:
@classmethod
def INPUT_TYPES(cls):
return {
"required": {
"destination": ("MASK",),
"source": ("MASK",),
"x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
"y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
"operation": (["multiply", "add", "subtract"],),
}
}
CATEGORY = "mask"
RETURN_TYPES = ("MASK",)
FUNCTION = "combine"
def combine(self, destination, source, x, y, operation):
output = destination.clone()
left, top = (x, y,)
right, bottom = (min(left + source.shape[1], destination.shape[1]), min(top + source.shape[0], destination.shape[0]))
visible_width, visible_height = (right - left, bottom - top,)
source_portion = source[:visible_height, :visible_width]
destination_portion = destination[top:bottom, left:right]
if operation == "multiply":
output[top:bottom, left:right] = destination_portion * source_portion
elif operation == "add":
output[top:bottom, left:right] = destination_portion + source_portion
elif operation == "subtract":
output[top:bottom, left:right] = destination_portion - source_portion
output = torch.clamp(output, 0.0, 1.0)
return (output,)
class FeatherMask:
@classmethod
def INPUT_TYPES(cls):
return {
"required": {
"mask": ("MASK",),
"left": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
"top": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
"right": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
"bottom": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
}
}
CATEGORY = "mask"
RETURN_TYPES = ("MASK",)
FUNCTION = "feather"
def feather(self, mask, left, top, right, bottom):
output = mask.clone()
left = min(left, output.shape[1])
right = min(right, output.shape[1])
top = min(top, output.shape[0])
bottom = min(bottom, output.shape[0])
for x in range(left):
feather_rate = (x + 1.0) / left
output[:, x] *= feather_rate
for x in range(right):
feather_rate = (x + 1) / right
output[:, -x] *= feather_rate
for y in range(top):
feather_rate = (y + 1) / top
output[y, :] *= feather_rate
for y in range(bottom):
feather_rate = (y + 1) / bottom
output[-y, :] *= feather_rate
return (output,)
NODE_CLASS_MAPPINGS = {
"LatentCompositeMasked": LatentCompositeMasked,
"MaskToImage": MaskToImage,
"ImageToMask": ImageToMask,
"SolidMask": SolidMask,
"InvertMask": InvertMask,
"CropMask": CropMask,
"MaskComposite": MaskComposite,
"FeatherMask": FeatherMask,
}
NODE_DISPLAY_NAME_MAPPINGS = {
"ImageToMask": "Convert Image to Mask",
"MaskToImage": "Convert Mask to Image",
}

View File

@ -1,6 +1,6 @@
import os
from comfy_extras.chainner_models import model_loading
import model_management
from comfy import model_management
import torch
import comfy.utils
import folder_paths

View File

@ -10,6 +10,8 @@ import gc
import torch
import nodes
import comfy.model_management
def get_input_data(inputs, class_def, unique_id, outputs={}, prompt={}, extra_data={}):
valid_inputs = class_def.INPUT_TYPES()
input_data_all = {}
@ -202,10 +204,7 @@ class PromptExecutor:
self.server.send_sync("executing", { "node": None }, self.server.client_id)
gc.collect()
if torch.cuda.is_available():
if torch.version.cuda: #This seems to make things worse on ROCm so I only do it for cuda
torch.cuda.empty_cache()
torch.cuda.ipc_collect()
comfy.model_management.soft_empty_cache()
def validate_inputs(prompt, item):

View File

@ -18,6 +18,7 @@ a111:
#other_ui:
# base_path: path/to/ui
# checkpoints: models/checkpoints
# gligen: models/gligen
# custom_nodes: path/custom_nodes

View File

@ -12,8 +12,8 @@ except:
folder_names_and_paths = {}
models_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "models")
base_path = os.path.dirname(os.path.realpath(__file__))
models_dir = os.path.join(base_path, "models")
folder_names_and_paths["checkpoints"] = ([os.path.join(models_dir, "checkpoints")], supported_ckpt_extensions)
folder_names_and_paths["configs"] = ([os.path.join(models_dir, "configs")], [".yaml"])
@ -26,8 +26,13 @@ folder_names_and_paths["embeddings"] = ([os.path.join(models_dir, "embeddings")]
folder_names_and_paths["diffusers"] = ([os.path.join(models_dir, "diffusers")], ["folder"])
folder_names_and_paths["controlnet"] = ([os.path.join(models_dir, "controlnet"), os.path.join(models_dir, "t2i_adapter")], supported_pt_extensions)
folder_names_and_paths["gligen"] = ([os.path.join(models_dir, "gligen")], supported_pt_extensions)
folder_names_and_paths["upscale_models"] = ([os.path.join(models_dir, "upscale_models")], supported_pt_extensions)
folder_names_and_paths["custom_nodes"] = ([os.path.join(base_path, "custom_nodes")], [])
output_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), "output")
temp_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), "temp")
input_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), "input")

15
main.py
View File

@ -81,6 +81,14 @@ if __name__ == "__main__":
server = server.PromptServer(loop)
q = execution.PromptQueue(server)
extra_model_paths_config_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "extra_model_paths.yaml")
if os.path.isfile(extra_model_paths_config_path):
load_extra_path_config(extra_model_paths_config_path)
if args.extra_model_paths_config:
for config_path in itertools.chain(*args.extra_model_paths_config):
load_extra_path_config(config_path)
init_custom_nodes()
server.add_routes()
hijack_progress(server)
@ -91,13 +99,6 @@ if __name__ == "__main__":
dont_print = args.dont_print_server
extra_model_paths_config_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "extra_model_paths.yaml")
if os.path.isfile(extra_model_paths_config_path):
load_extra_path_config(extra_model_paths_config_path)
if args.extra_model_paths_config:
for config_path in itertools.chain(*args.extra_model_paths_config):
load_extra_path_config(config_path)
if args.output_directory:
output_dir = os.path.abspath(args.output_directory)

View File

133
nodes.py
View File

@ -21,16 +21,16 @@ import comfy.utils
import comfy.clip_vision
import model_management
import comfy.model_management
import importlib
import folder_paths
def before_node_execution():
model_management.throw_exception_if_processing_interrupted()
comfy.model_management.throw_exception_if_processing_interrupted()
def interrupt_processing(value=True):
model_management.interrupt_current_processing(value)
comfy.model_management.interrupt_current_processing(value)
MAX_RESOLUTION=8192
@ -241,7 +241,7 @@ class DiffusersLoader:
model_path = os.path.join(search_path, model_path)
break
return comfy.diffusers_convert.load_diffusers(model_path, fp16=model_management.should_use_fp16(), output_vae=output_vae, output_clip=output_clip, embedding_directory=folder_paths.get_folder_paths("embeddings"))
return comfy.diffusers_convert.load_diffusers(model_path, fp16=comfy.model_management.should_use_fp16(), output_vae=output_vae, output_clip=output_clip, embedding_directory=folder_paths.get_folder_paths("embeddings"))
class unCLIPCheckpointLoader:
@ -490,6 +490,51 @@ class unCLIPConditioning:
c.append(n)
return (c, )
class GLIGENLoader:
@classmethod
def INPUT_TYPES(s):
return {"required": { "gligen_name": (folder_paths.get_filename_list("gligen"), )}}
RETURN_TYPES = ("GLIGEN",)
FUNCTION = "load_gligen"
CATEGORY = "loaders"
def load_gligen(self, gligen_name):
gligen_path = folder_paths.get_full_path("gligen", gligen_name)
gligen = comfy.sd.load_gligen(gligen_path)
return (gligen,)
class GLIGENTextBoxApply:
@classmethod
def INPUT_TYPES(s):
return {"required": {"conditioning_to": ("CONDITIONING", ),
"clip": ("CLIP", ),
"gligen_textbox_model": ("GLIGEN", ),
"text": ("STRING", {"multiline": True}),
"width": ("INT", {"default": 64, "min": 8, "max": MAX_RESOLUTION, "step": 8}),
"height": ("INT", {"default": 64, "min": 8, "max": MAX_RESOLUTION, "step": 8}),
"x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
"y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
}}
RETURN_TYPES = ("CONDITIONING",)
FUNCTION = "append"
CATEGORY = "conditioning/gligen"
def append(self, conditioning_to, clip, gligen_textbox_model, text, width, height, x, y):
c = []
cond, cond_pooled = clip.encode_from_tokens(clip.tokenize(text), return_pooled=True)
for t in conditioning_to:
n = [t[0], t[1].copy()]
position_params = [(cond_pooled, height // 8, width // 8, y // 8, x // 8)]
prev = []
if "gligen" in n[1]:
prev = n[1]['gligen'][2]
n[1]['gligen'] = ("position", gligen_textbox_model, prev + position_params)
c.append(n)
return (c, )
class EmptyLatentImage:
def __init__(self, device="cpu"):
@ -510,6 +555,24 @@ class EmptyLatentImage:
return ({"samples":latent}, )
class LatentFromBatch:
@classmethod
def INPUT_TYPES(s):
return {"required": { "samples": ("LATENT",),
"batch_index": ("INT", {"default": 0, "min": 0, "max": 63}),
}}
RETURN_TYPES = ("LATENT",)
FUNCTION = "rotate"
CATEGORY = "latent"
def rotate(self, samples, batch_index):
s = samples.copy()
s_in = samples["samples"]
batch_index = min(s_in.shape[0] - 1, batch_index)
s["samples"] = s_in[batch_index:batch_index + 1].clone()
s["batch_index"] = batch_index
return (s,)
class LatentUpscale:
upscale_methods = ["nearest-exact", "bilinear", "area"]
@ -680,12 +743,19 @@ class SetLatentNoiseMask:
def common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent, denoise=1.0, disable_noise=False, start_step=None, last_step=None, force_full_denoise=False):
latent_image = latent["samples"]
noise_mask = None
device = model_management.get_torch_device()
device = comfy.model_management.get_torch_device()
if disable_noise:
noise = torch.zeros(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, device="cpu")
else:
noise = torch.randn(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, generator=torch.manual_seed(seed), device="cpu")
batch_index = 0
if "batch_index" in latent:
batch_index = latent["batch_index"]
generator = torch.manual_seed(seed)
for i in range(batch_index):
noise = torch.randn([1] + list(latent_image.size())[1:], dtype=latent_image.dtype, layout=latent_image.layout, generator=generator, device="cpu")
noise = torch.randn(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, generator=generator, device="cpu")
if "noise_mask" in latent:
noise_mask = latent['noise_mask']
@ -696,7 +766,7 @@ def common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive,
noise_mask = noise_mask.to(device)
real_model = None
model_management.load_model_gpu(model)
comfy.model_management.load_model_gpu(model)
real_model = model.model
noise = noise.to(device)
@ -706,27 +776,30 @@ def common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive,
negative_copy = []
control_nets = []
def get_models(cond):
models = []
for c in cond:
if 'control' in c[1]:
models += [c[1]['control']]
if 'gligen' in c[1]:
models += [c[1]['gligen'][1]]
return models
for p in positive:
t = p[0]
if t.shape[0] < noise.shape[0]:
t = torch.cat([t] * noise.shape[0])
t = t.to(device)
if 'control' in p[1]:
control_nets += [p[1]['control']]
positive_copy += [[t] + p[1:]]
for n in negative:
t = n[0]
if t.shape[0] < noise.shape[0]:
t = torch.cat([t] * noise.shape[0])
t = t.to(device)
if 'control' in n[1]:
control_nets += [n[1]['control']]
negative_copy += [[t] + n[1:]]
control_net_models = []
for x in control_nets:
control_net_models += x.get_control_models()
model_management.load_controlnet_gpu(control_net_models)
models = get_models(positive) + get_models(negative)
comfy.model_management.load_controlnet_gpu(models)
if sampler_name in comfy.samplers.KSampler.SAMPLERS:
sampler = comfy.samplers.KSampler(real_model, steps=steps, device=device, sampler=sampler_name, scheduler=scheduler, denoise=denoise, model_options=model.model_options)
@ -736,8 +809,8 @@ def common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive,
samples = sampler.sample(noise, positive_copy, negative_copy, cfg=cfg, latent_image=latent_image, start_step=start_step, last_step=last_step, force_full_denoise=force_full_denoise, denoise_mask=noise_mask)
samples = samples.cpu()
for c in control_nets:
c.cleanup()
for m in models:
m.cleanup()
out = latent.copy()
out["samples"] = samples
@ -871,7 +944,7 @@ class SaveImage:
"filename": file,
"subfolder": subfolder,
"type": self.type
});
})
counter += 1
return { "ui": { "images": results } }
@ -932,7 +1005,7 @@ class LoadImageMask:
"channel": (["alpha", "red", "green", "blue"], ),}
}
CATEGORY = "image"
CATEGORY = "mask"
RETURN_TYPES = ("MASK",)
FUNCTION = "load_image"
@ -1073,6 +1146,7 @@ NODE_CLASS_MAPPINGS = {
"VAELoader": VAELoader,
"EmptyLatentImage": EmptyLatentImage,
"LatentUpscale": LatentUpscale,
"LatentFromBatch": LatentFromBatch,
"SaveImage": SaveImage,
"PreviewImage": PreviewImage,
"LoadImage": LoadImage,
@ -1102,6 +1176,9 @@ NODE_CLASS_MAPPINGS = {
"VAEEncodeTiled": VAEEncodeTiled,
"TomePatchModel": TomePatchModel,
"unCLIPCheckpointLoader": unCLIPCheckpointLoader,
"GLIGENLoader": GLIGENLoader,
"GLIGENTextBoxApply": GLIGENTextBoxApply,
"CheckpointLoader": CheckpointLoader,
"DiffusersLoader": DiffusersLoader,
}
@ -1178,17 +1255,19 @@ def load_custom_node(module_path):
print(f"Cannot import {module_path} module for custom nodes:", e)
def load_custom_nodes():
CUSTOM_NODE_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "custom_nodes")
possible_modules = os.listdir(CUSTOM_NODE_PATH)
if "__pycache__" in possible_modules:
possible_modules.remove("__pycache__")
node_paths = folder_paths.get_folder_paths("custom_nodes")
for custom_node_path in node_paths:
possible_modules = os.listdir(custom_node_path)
if "__pycache__" in possible_modules:
possible_modules.remove("__pycache__")
for possible_module in possible_modules:
module_path = os.path.join(CUSTOM_NODE_PATH, possible_module)
if os.path.isfile(module_path) and os.path.splitext(module_path)[1] != ".py": continue
load_custom_node(module_path)
for possible_module in possible_modules:
module_path = os.path.join(custom_node_path, possible_module)
if os.path.isfile(module_path) and os.path.splitext(module_path)[1] != ".py": continue
load_custom_node(module_path)
def init_custom_nodes():
load_custom_nodes()
load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_upscale_model.py"))
load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_post_processing.py"))
load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_mask.py"))

View File

@ -119,14 +119,30 @@
"\n",
"\n",
"# ControlNet\n",
"#!wget -c https://huggingface.co/webui/ControlNet-modules-safetensors/resolve/main/control_depth-fp16.safetensors -P ./models/controlnet/\n",
"#!wget -c https://huggingface.co/webui/ControlNet-modules-safetensors/resolve/main/control_scribble-fp16.safetensors -P ./models/controlnet/\n",
"#!wget -c https://huggingface.co/webui/ControlNet-modules-safetensors/resolve/main/control_openpose-fp16.safetensors -P ./models/controlnet/\n",
"#!wget -c https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11e_sd15_ip2p_fp16.safetensors -P ./models/controlnet/\n",
"#!wget -c https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11e_sd15_shuffle_fp16.safetensors -P ./models/controlnet/\n",
"#!wget -c https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11p_sd15_canny_fp16.safetensors -P ./models/controlnet/\n",
"#!wget -c https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11f1p_sd15_depth_fp16.safetensors -P ./models/controlnet/\n",
"#!wget -c https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11p_sd15_inpaint_fp16.safetensors -P ./models/controlnet/\n",
"#!wget -c https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11p_sd15_lineart_fp16.safetensors -P ./models/controlnet/\n",
"#!wget -c https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11p_sd15_mlsd_fp16.safetensors -P ./models/controlnet/\n",
"#!wget -c https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11p_sd15_normalbae_fp16.safetensors -P ./models/controlnet/\n",
"#!wget -c https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11p_sd15_openpose_fp16.safetensors -P ./models/controlnet/\n",
"#!wget -c https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11p_sd15_scribble_fp16.safetensors -P ./models/controlnet/\n",
"#!wget -c https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11p_sd15_seg_fp16.safetensors -P ./models/controlnet/\n",
"#!wget -c https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11p_sd15_softedge_fp16.safetensors -P ./models/controlnet/\n",
"#!wget -c https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11p_sd15s2_lineart_anime_fp16.safetensors -P ./models/controlnet/\n",
"#!wget -c https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11u_sd15_tile_fp16.safetensors -P ./models/controlnet/\n",
"\n",
"\n",
"# Controlnet Preprocessor nodes by Fannovel16\n",
"#!cd custom_nodes && git clone https://github.com/Fannovel16/comfy_controlnet_preprocessors; cd comfy_controlnet_preprocessors && python install.py\n",
"\n",
"\n",
"# GLIGEN\n",
"#!wget -c https://huggingface.co/comfyanonymous/GLIGEN_pruned_safetensors/resolve/main/gligen_sd14_textbox_pruned_fp16.safetensors -P ./models/gligen/\n",
"\n",
"\n",
"# ESRGAN upscale model\n",
"#!wget -c https://huggingface.co/sberbank-ai/Real-ESRGAN/resolve/main/RealESRGAN_x2.pth -P ./models/upscale_models/\n",
"#!wget -c https://huggingface.co/sberbank-ai/Real-ESRGAN/resolve/main/RealESRGAN_x4.pth -P ./models/upscale_models/\n",

View File

@ -107,7 +107,7 @@ const colorPalettes = {
"descrip-text": "#444",
"drag-text": "#555",
"error-text": "#F44336",
"border-color": "#CCC"
"border-color": "#888"
}
},
},

View File

@ -0,0 +1,144 @@
import { app } from "/scripts/app.js";
// Allows you to edit the attention weight by holding ctrl (or cmd) and using the up/down arrow keys
app.registerExtension({
name: "Comfy.EditAttention",
init() {
const editAttentionDelta = app.ui.settings.addSetting({
id: "Comfy.EditAttention.Delta",
name: "Ctrl+up/down precision",
type: "slider",
attrs: {
min: 0.01,
max: 0.5,
step: 0.01,
},
defaultValue: 0.05,
});
function incrementWeight(weight, delta) {
const floatWeight = parseFloat(weight);
if (isNaN(floatWeight)) return weight;
const newWeight = floatWeight + delta;
if (newWeight < 0) return "0";
return String(Number(newWeight.toFixed(10)));
}
function findNearestEnclosure(text, cursorPos) {
let start = cursorPos, end = cursorPos;
let openCount = 0, closeCount = 0;
// Find opening parenthesis before cursor
while (start >= 0) {
start--;
if (text[start] === "(" && openCount === closeCount) break;
if (text[start] === "(") openCount++;
if (text[start] === ")") closeCount++;
}
if (start < 0) return false;
openCount = 0;
closeCount = 0;
// Find closing parenthesis after cursor
while (end < text.length) {
if (text[end] === ")" && openCount === closeCount) break;
if (text[end] === "(") openCount++;
if (text[end] === ")") closeCount++;
end++;
}
if (end === text.length) return false;
return { start: start + 1, end: end };
}
function addWeightToParentheses(text) {
const parenRegex = /^\((.*)\)$/;
const parenMatch = text.match(parenRegex);
const floatRegex = /:([+-]?(\d*\.)?\d+([eE][+-]?\d+)?)/;
const floatMatch = text.match(floatRegex);
if (parenMatch && !floatMatch) {
return `(${parenMatch[1]}:1.0)`;
} else {
return text;
}
};
function editAttention(event) {
const inputField = event.composedPath()[0];
const delta = parseFloat(editAttentionDelta.value);
if (inputField.tagName !== "TEXTAREA") return;
if (!(event.key === "ArrowUp" || event.key === "ArrowDown")) return;
if (!event.ctrlKey && !event.metaKey) return;
event.preventDefault();
let start = inputField.selectionStart;
let end = inputField.selectionEnd;
let selectedText = inputField.value.substring(start, end);
// If there is no selection, attempt to find the nearest enclosure, or select the current word
if (!selectedText) {
const nearestEnclosure = findNearestEnclosure(inputField.value, start);
if (nearestEnclosure) {
start = nearestEnclosure.start;
end = nearestEnclosure.end;
selectedText = inputField.value.substring(start, end);
} else {
// Select the current word, find the start and end of the word
const delimiters = " .,\\/!?%^*;:{}=-_`~()\r\n\t";
while (!delimiters.includes(inputField.value[start - 1]) && start > 0) {
start--;
}
while (!delimiters.includes(inputField.value[end]) && end < inputField.value.length) {
end++;
}
selectedText = inputField.value.substring(start, end);
if (!selectedText) return;
}
}
// If the selection ends with a space, remove it
if (selectedText[selectedText.length - 1] === " ") {
selectedText = selectedText.substring(0, selectedText.length - 1);
end -= 1;
}
// If there are parentheses left and right of the selection, select them
if (inputField.value[start - 1] === "(" && inputField.value[end] === ")") {
start -= 1;
end += 1;
selectedText = inputField.value.substring(start, end);
}
// If the selection is not enclosed in parentheses, add them
if (selectedText[0] !== "(" || selectedText[selectedText.length - 1] !== ")") {
selectedText = `(${selectedText})`;
}
// If the selection does not have a weight, add a weight of 1.0
selectedText = addWeightToParentheses(selectedText);
// Increment the weight
const weightDelta = event.key === "ArrowUp" ? delta : -delta;
const updatedText = selectedText.replace(/\((.*):(\d+(?:\.\d+)?)\)/, (match, text, weight) => {
weight = incrementWeight(weight, weightDelta);
if (weight == 1) {
return text;
} else {
return `(${text}:${weight})`;
}
});
inputField.setRangeText(updatedText, start, end, "select");
}
window.addEventListener("keydown", editAttention);
},
});

View File

@ -0,0 +1,76 @@
import { app } from "/scripts/app.js";
const id = "Comfy.Keybinds";
app.registerExtension({
name: id,
init() {
const keybindListener = function(event) {
const modifierPressed = event.ctrlKey || event.metaKey;
// Queue prompt using ctrl or command + enter
if (modifierPressed && (event.key === "Enter" || event.keyCode === 13 || event.keyCode === 10)) {
app.queuePrompt(event.shiftKey ? -1 : 0);
return;
}
const target = event.composedPath()[0];
if (target.tagName === "INPUT" || target.tagName === "TEXTAREA") {
return;
}
const modifierKeyIdMap = {
"s": "#comfy-save-button",
83: "#comfy-save-button",
"o": "#comfy-file-input",
79: "#comfy-file-input",
"Backspace": "#comfy-clear-button",
8: "#comfy-clear-button",
"Delete": "#comfy-clear-button",
46: "#comfy-clear-button",
"d": "#comfy-load-default-button",
68: "#comfy-load-default-button",
};
const modifierKeybindId = modifierKeyIdMap[event.key] || modifierKeyIdMap[event.keyCode];
if (modifierPressed && modifierKeybindId) {
event.preventDefault();
const elem = document.querySelector(modifierKeybindId);
elem.click();
return;
}
// Finished Handling all modifier keybinds, now handle the rest
if (event.ctrlKey || event.altKey || event.metaKey) {
return;
}
// Close out of modals using escape
if (event.key === "Escape" || event.keyCode === 27) {
const modals = document.querySelectorAll(".comfy-modal");
const modal = Array.from(modals).find(modal => window.getComputedStyle(modal).getPropertyValue("display") !== "none");
if (modal) {
modal.style.display = "none";
}
}
const keyIdMap = {
"q": "#comfy-view-queue-button",
81: "#comfy-view-queue-button",
"h": "#comfy-view-history-button",
72: "#comfy-view-history-button",
"r": "#comfy-refresh-button",
82: "#comfy-refresh-button",
};
const buttonId = keyIdMap[event.key] || keyIdMap[event.keyCode];
if (buttonId) {
const button = document.querySelector(buttonId);
button.click();
}
}
window.addEventListener("keydown", keybindListener, true);
}
});

View File

@ -0,0 +1,41 @@
import {app} from "../../scripts/app.js";
import {ComfyWidgets} from "../../scripts/widgets.js";
// Node that add notes to your project
app.registerExtension({
name: "Comfy.NoteNode",
registerCustomNodes() {
class NoteNode {
color=LGraphCanvas.node_colors.yellow.color;
bgcolor=LGraphCanvas.node_colors.yellow.bgcolor;
groupcolor = LGraphCanvas.node_colors.yellow.groupcolor;
constructor() {
if (!this.properties) {
this.properties = {};
this.properties.text="";
}
ComfyWidgets.STRING(this, "", ["", {default:this.properties.text, multiline: true}], app)
this.serialize_widgets = true;
this.isVirtualNode = true;
}
}
// Load default visibility
LiteGraph.registerNodeType(
"Note",
Object.assign(NoteNode, {
title_mode: LiteGraph.NORMAL_TITLE,
title: "Note",
collapsable: true,
})
);
NoteNode.category = "utils";
},
});

View File

@ -1,21 +1,72 @@
import { app } from "/scripts/app.js";
import { ComfyWidgets } from "/scripts/widgets.js";
// Adds defaults for quickly adding nodes with middle click on the input/output
app.registerExtension({
name: "Comfy.SlotDefaults",
suggestionsNumber: null,
init() {
LiteGraph.middle_click_slot_add_default_node = true;
LiteGraph.slot_types_default_in = {
MODEL: "CheckpointLoaderSimple",
LATENT: "EmptyLatentImage",
VAE: "VAELoader",
};
LiteGraph.slot_types_default_out = {
LATENT: "VAEDecode",
IMAGE: "SaveImage",
CLIP: "CLIPTextEncode",
};
this.suggestionsNumber = app.ui.settings.addSetting({
id: "Comfy.NodeSuggestions.number",
name: "number of nodes suggestions",
type: "slider",
attrs: {
min: 1,
max: 100,
step: 1,
},
defaultValue: 5,
onChange: (newVal, oldVal) => {
this.setDefaults(newVal);
}
});
},
slot_types_default_out: {},
slot_types_default_in: {},
async beforeRegisterNodeDef(nodeType, nodeData, app) {
var nodeId = nodeData.name;
var inputs = [];
inputs = nodeData["input"]["required"]; //only show required inputs to reduce the mess also not logical to create node with optional inputs
for (const inputKey in inputs) {
var input = (inputs[inputKey]);
if (typeof input[0] !== "string") continue;
var type = input[0]
if (type in ComfyWidgets) {
var customProperties = input[1]
if (!(customProperties?.forceInput)) continue; //ignore widgets that don't force input
}
if (!(type in this.slot_types_default_out)) {
this.slot_types_default_out[type] = ["Reroute"];
}
if (this.slot_types_default_out[type].includes(nodeId)) continue;
this.slot_types_default_out[type].push(nodeId);
}
var outputs = nodeData["output"];
for (const key in outputs) {
var type = outputs[key];
if (!(type in this.slot_types_default_in)) {
this.slot_types_default_in[type] = ["Reroute"];// ["Reroute", "Primitive"]; primitive doesn't always work :'()
}
this.slot_types_default_in[type].push(nodeId);
}
var maxNum = this.suggestionsNumber.value;
this.setDefaults(maxNum);
},
setDefaults(maxNum) {
LiteGraph.slot_types_default_out = {};
LiteGraph.slot_types_default_in = {};
for (const type in this.slot_types_default_out) {
LiteGraph.slot_types_default_out[type] = this.slot_types_default_out[type].slice(0, maxNum);
}
for (const type in this.slot_types_default_in) {
LiteGraph.slot_types_default_in[type] = this.slot_types_default_in[type].slice(0, maxNum);
}
}
});

View File

@ -9,7 +9,7 @@ app.registerExtension({
app.ui.settings.addSetting({
id: "Comfy.SnapToGrid.GridSize",
name: "Grid Size",
type: "number",
type: "slider",
attrs: {
min: 1,
max: 500,

View File

@ -1,4 +1,4 @@
import { ComfyWidgets, addRandomizeWidget } from "/scripts/widgets.js";
import { ComfyWidgets, addValueControlWidget } from "/scripts/widgets.js";
import { app } from "/scripts/app.js";
const CONVERTED_TYPE = "converted-widget";
@ -23,7 +23,7 @@ function hideWidget(node, widget, suffix = "") {
return widget.origSerializeValue ? widget.origSerializeValue() : widget.value;
};
// Hide any linked widgets, e.g. seed+randomize
// Hide any linked widgets, e.g. seed+seedControl
if (widget.linkedWidgets) {
for (const w of widget.linkedWidgets) {
hideWidget(node, w, ":" + widget.name);
@ -40,7 +40,7 @@ function showWidget(widget) {
delete widget.origComputeSize;
delete widget.origSerializeValue;
// Hide any linked widgets, e.g. seed+randomize
// Hide any linked widgets, e.g. seed+seedControl
if (widget.linkedWidgets) {
for (const w of widget.linkedWidgets) {
showWidget(w);
@ -159,27 +159,33 @@ app.registerExtension({
const r = origOnInputDblClick ? origOnInputDblClick.apply(this, arguments) : undefined;
const input = this.inputs[slot];
if (input.widget && !input[ignoreDblClick]) {
const node = LiteGraph.createNode("PrimitiveNode");
app.graph.add(node);
// Calculate a position that wont directly overlap another node
const pos = [this.pos[0] - node.size[0] - 30, this.pos[1]];
while (isNodeAtPos(pos)) {
pos[1] += LiteGraph.NODE_TITLE_HEIGHT;
if (!input.widget || !input[ignoreDblClick]) {
// Not a widget input or already handled input
if (!(input.type in ComfyWidgets) && !(input.widget.config?.[0] instanceof Array)) {
return r; //also Not a ComfyWidgets input or combo (do nothing)
}
node.pos = pos;
node.connect(0, this, slot);
node.title = input.name;
// Prevent adding duplicates due to triple clicking
input[ignoreDblClick] = true;
setTimeout(() => {
delete input[ignoreDblClick];
}, 300);
}
// Create a primitive node
const node = LiteGraph.createNode("PrimitiveNode");
app.graph.add(node);
// Calculate a position that wont directly overlap another node
const pos = [this.pos[0] - node.size[0] - 30, this.pos[1]];
while (isNodeAtPos(pos)) {
pos[1] += LiteGraph.NODE_TITLE_HEIGHT;
}
node.pos = pos;
node.connect(0, this, slot);
node.title = input.name;
// Prevent adding duplicates due to triple clicking
input[ignoreDblClick] = true;
setTimeout(() => {
delete input[ignoreDblClick];
}, 300);
return r;
};
},
@ -233,7 +239,9 @@ app.registerExtension({
// Fires before the link is made allowing us to reject it if it isn't valid
// No widget, we cant connect
if (!input.widget) return false;
if (!input.widget) {
if (!(input.type in ComfyWidgets)) return false;
}
if (this.outputs[slot].links?.length) {
return this.#isValidConnection(input);
@ -252,9 +260,17 @@ app.registerExtension({
const input = theirNode.inputs[link.target_slot];
if (!input) return;
const widget = input.widget;
const { type, linkType } = getWidgetType(widget.config);
var _widget;
if (!input.widget) {
if (!(input.type in ComfyWidgets)) return;
_widget = { "name": input.name, "config": [input.type, {}] }//fake widget
} else {
_widget = input.widget;
}
const widget = _widget;
const { type, linkType } = getWidgetType(widget.config);
// Update our output to restrict to the widget type
this.outputs[0].type = linkType;
this.outputs[0].name = type;
@ -274,7 +290,7 @@ app.registerExtension({
if (type in ComfyWidgets) {
widget = (ComfyWidgets[type](this, "value", inputData, app) || {}).widget;
} else {
widget = this.addWidget(type, "value", null, () => {}, {});
widget = this.addWidget(type, "value", null, () => { }, {});
}
if (node?.widgets && widget) {
@ -285,7 +301,7 @@ app.registerExtension({
}
if (widget.type === "number") {
addRandomizeWidget(this, widget, "Random after every gen");
addValueControlWidget(this, widget, "fixed");
}
// When our value changes, update other widgets to reflect our changes
@ -319,7 +335,20 @@ app.registerExtension({
const config1 = this.outputs[0].widget.config;
const config2 = input.widget.config;
if (config1[0] !== config2[0]) return false;
if (config1[0] instanceof Array) {
// These checks shouldnt actually be necessary as the types should match
// but double checking doesn't hurt
// New input isnt a combo
if (!(config2[0] instanceof Array)) return false;
// New imput combo has a different size
if (config1[0].length !== config2[0].length) return false;
// New input combo has different elements
if (config1[0].find((v, i) => config2[0][i] !== v)) return false;
} else if (config1[0] !== config2[0]) {
// Configs dont match
return false;
}
for (const k in config1[1]) {
if (k !== "default") {

View File

@ -142,6 +142,8 @@
pointerevents_method: "pointer", // "mouse"|"pointer" use mouse for retrocompatibility issues? (none found @ now)
// TODO implement pointercancel, gotpointercapture, lostpointercapture, (pointerover, pointerout if necessary)
ctrl_shift_v_paste_connect_unselected_outputs: true, //[true!] allows ctrl + shift + v to paste nodes with the outputs of the unselected nodes connected with the inputs of the newly pasted nodes
/**
* Register a node class so it can be listed when the user wants to create a new one
* @method registerNodeType
@ -253,13 +255,18 @@
* @param {String|Object} type name of the node or the node constructor itself
*/
unregisterNodeType: function(type) {
var base_class = type.constructor === String ? this.registered_node_types[type] : type;
if(!base_class)
throw("node type not found: " + type );
delete this.registered_node_types[base_class.type];
if(base_class.constructor.name)
delete this.Nodes[base_class.constructor.name];
},
const base_class =
type.constructor === String
? this.registered_node_types[type]
: type;
if (!base_class) {
throw "node type not found: " + type;
}
delete this.registered_node_types[base_class.type];
if (base_class.constructor.name) {
delete this.Nodes[base_class.constructor.name];
}
},
/**
* Save a slot type and his node
@ -267,38 +274,49 @@
* @param {String|Object} type name of the node or the node constructor itself
* @param {String} slot_type name of the slot type (variable type), eg. string, number, array, boolean, ..
*/
registerNodeAndSlotType: function(type,slot_type,out){
registerNodeAndSlotType: function(type, slot_type, out){
out = out || false;
var base_class = type.constructor === String && this.registered_node_types[type] !== "anonymous" ? this.registered_node_types[type] : type;
var sCN = base_class.constructor.type;
if (typeof slot_type == "string"){
var aTypes = slot_type.split(",");
}else if (slot_type == this.EVENT || slot_type == this.ACTION){
var aTypes = ["_event_"];
}else{
var aTypes = ["*"];
const base_class =
type.constructor === String &&
this.registered_node_types[type] !== "anonymous"
? this.registered_node_types[type]
: type;
const class_type = base_class.constructor.type;
let allTypes = [];
if (typeof slot_type === "string") {
allTypes = slot_type.split(",");
} else if (slot_type == this.EVENT || slot_type == this.ACTION) {
allTypes = ["_event_"];
} else {
allTypes = ["*"];
}
for (var i = 0; i < aTypes.length; ++i) {
var sT = aTypes[i]; //.toLowerCase();
if (sT === ""){
sT = "*";
for (let i = 0; i < allTypes.length; ++i) {
let slotType = allTypes[i];
if (slotType === "") {
slotType = "*";
}
var registerTo = out ? "registered_slot_out_types" : "registered_slot_in_types";
if (typeof this[registerTo][sT] == "undefined") this[registerTo][sT] = {nodes: []};
this[registerTo][sT].nodes.push(sCN);
const registerTo = out
? "registered_slot_out_types"
: "registered_slot_in_types";
if (this[registerTo][slotType] === undefined) {
this[registerTo][slotType] = { nodes: [] };
}
if (!this[registerTo][slotType].nodes.includes(class_type)) {
this[registerTo][slotType].nodes.push(class_type);
}
// check if is a new type
if (!out){
if (!this.slot_types_in.includes(sT.toLowerCase())){
this.slot_types_in.push(sT.toLowerCase());
if (!out) {
if (!this.slot_types_in.includes(slotType.toLowerCase())) {
this.slot_types_in.push(slotType.toLowerCase());
this.slot_types_in.sort();
}
}else{
if (!this.slot_types_out.includes(sT.toLowerCase())){
this.slot_types_out.push(sT.toLowerCase());
} else {
if (!this.slot_types_out.includes(slotType.toLowerCase())) {
this.slot_types_out.push(slotType.toLowerCase());
this.slot_types_out.sort();
}
}
@ -1616,7 +1634,8 @@
var nRet = null;
for (var i = nodes_list.length - 1; i >= 0; i--) {
var n = nodes_list[i];
if (n.isPointInside(x, y, margin)) {
var skip_title = n.constructor.title_mode == LiteGraph.NO_TITLE;
if (n.isPointInside(x, y, margin, skip_title)) {
// check for lesser interest nodes (TODO check for overlapping, use the top)
/*if (typeof n == "LGraphGroup"){
nRet = n;
@ -3967,8 +3986,8 @@
var aSource = (type+"").toLowerCase().split(",");
var aDest = aSlots[i].type=="0"||aSlots[i].type=="*"?"0":aSlots[i].type;
aDest = (aDest+"").toLowerCase().split(",");
for(sI=0;sI<aSource.length;sI++){
for(dI=0;dI<aDest.length;dI++){
for(var sI=0;sI<aSource.length;sI++){
for(var dI=0;dI<aDest.length;dI++){
if (aSource[sI]=="_event_") aSource[sI] = LiteGraph.EVENT;
if (aDest[sI]=="_event_") aDest[sI] = LiteGraph.EVENT;
if (aSource[sI]=="*") aSource[sI] = 0;
@ -3987,8 +4006,8 @@
var aSource = (type+"").toLowerCase().split(",");
var aDest = aSlots[i].type=="0"||aSlots[i].type=="*"?"0":aSlots[i].type;
aDest = (aDest+"").toLowerCase().split(",");
for(sI=0;sI<aSource.length;sI++){
for(dI=0;dI<aDest.length;dI++){
for(var sI=0;sI<aSource.length;sI++){
for(var dI=0;dI<aDest.length;dI++){
if (aSource[sI]=="*") aSource[sI] = 0;
if (aDest[sI]=="*") aDest[sI] = 0;
if (aSource[sI] == aDest[dI]) {
@ -4019,7 +4038,7 @@
if (target_node && target_node.constructor === Number) {
target_node = this.graph.getNodeById(target_node);
}
target_slot = target_node.findInputSlotByType(target_slotType, false, true);
var target_slot = target_node.findInputSlotByType(target_slotType, false, true);
if (target_slot >= 0 && target_slot !== null){
//console.debug("CONNbyTYPE type "+target_slotType+" for "+target_slot)
return this.connect(slot, target_node, target_slot);
@ -4072,7 +4091,7 @@
if (source_node && source_node.constructor === Number) {
source_node = this.graph.getNodeById(source_node);
}
source_slot = source_node.findOutputSlotByType(source_slotType, false, true);
var source_slot = source_node.findOutputSlotByType(source_slotType, false, true);
if (source_slot >= 0 && source_slot !== null){
//console.debug("CONNbyTYPE OUT! type "+source_slotType+" for "+source_slot)
return source_node.connect(source_slot, this, slot);
@ -5184,6 +5203,7 @@ LGraphNode.prototype.executeAction = function(action)
this.editor_alpha = 1; //used for transition
this.pause_rendering = false;
this.clear_background = true;
this.clear_background_color = "#222";
this.read_only = false; //if set to true users cannot modify the graph
this.render_only_selected = true;
@ -6986,7 +7006,7 @@ LGraphNode.prototype.executeAction = function(action)
block_default = true;
}
if (e.code == "KeyC" && (e.metaKey || e.ctrlKey) && !e.shiftKey) {
if ((e.keyCode === 67) && (e.metaKey || e.ctrlKey) && !e.shiftKey) {
//copy
if (this.selected_nodes) {
this.copyToClipboard();
@ -6994,9 +7014,9 @@ LGraphNode.prototype.executeAction = function(action)
}
}
if (e.code == "KeyV" && (e.metaKey || e.ctrlKey) && !e.shiftKey) {
if ((e.keyCode === 86) && (e.metaKey || e.ctrlKey)) {
//paste
this.pasteFromClipboard();
this.pasteFromClipboard(e.shiftKey);
}
//delete or backspace
@ -7081,15 +7101,15 @@ LGraphNode.prototype.executeAction = function(action)
var target_node = this.graph.getNodeById(
link_info.origin_id
);
if (!target_node || !this.selected_nodes[target_node.id]) {
//improve this by allowing connections to non-selected nodes
if (!target_node) {
continue;
} //not selected
}
clipboard_info.links.push([
target_node._relative_id,
link_info.origin_slot, //j,
node._relative_id,
link_info.target_slot
link_info.target_slot,
target_node.id
]);
}
}
@ -7100,7 +7120,11 @@ LGraphNode.prototype.executeAction = function(action)
);
};
LGraphCanvas.prototype.pasteFromClipboard = function() {
LGraphCanvas.prototype.pasteFromClipboard = function(isConnectUnselected = false) {
// if ctrl + shift + v is off, return when isConnectUnselected is true (shift is pressed) to maintain old behavior
if (!LiteGraph.ctrl_shift_v_paste_connect_unselected_outputs && isConnectUnselected) {
return;
}
var data = localStorage.getItem("litegrapheditor_clipboard");
if (!data) {
return;
@ -7149,7 +7173,16 @@ LGraphNode.prototype.executeAction = function(action)
//create links
for (var i = 0; i < clipboard_info.links.length; ++i) {
var link_info = clipboard_info.links[i];
var origin_node = nodes[link_info[0]];
var origin_node;
var origin_node_relative_id = link_info[0];
if (origin_node_relative_id != null) {
origin_node = nodes[origin_node_relative_id];
} else if (LiteGraph.ctrl_shift_v_paste_connect_unselected_outputs && isConnectUnselected) {
var origin_node_id = link_info[4];
if (origin_node_id) {
origin_node = this.graph.getNodeById(origin_node_id);
}
}
var target_node = nodes[link_info[2]];
if( origin_node && target_node )
origin_node.connect(link_info[1], target_node, link_info[3]);
@ -8212,6 +8245,17 @@ LGraphNode.prototype.executeAction = function(action)
this.ds.toCanvasContext(ctx);
//render BG
if ( this.ds.scale < 1.5 && !bg_already_painted && this.clear_background_color )
{
ctx.fillStyle = this.clear_background_color;
ctx.fillRect(
this.visible_area[0],
this.visible_area[1],
this.visible_area[2],
this.visible_area[3]
);
}
if (
this.background_image &&
this.ds.scale > 0.5 &&
@ -12274,7 +12318,7 @@ LGraphNode.prototype.executeAction = function(action)
var aProps = LiteGraph.availableCanvasOptions;
aProps.sort();
for(pI in aProps){
for(var pI in aProps){
var pX = aProps[pI];
panel.addWidget( "boolean", pX, graphcanvas[pX], {key: pX, on: "True", off: "False"}, fUpdate);
}

View File

@ -4,27 +4,48 @@ import { api } from "./api.js";
import { defaultGraph } from "./defaultGraph.js";
import { getPngMetadata, importA1111 } from "./pnginfo.js";
class ComfyApp {
/**
* List of {number, batchCount} entries to queue
/**
* @typedef {import("types/comfy").ComfyExtension} ComfyExtension
*/
export class ComfyApp {
/**
* List of entries to queue
* @type {{number: number, batchCount: number}[]}
*/
#queueItems = [];
/**
* If the queue is currently being processed
* @type {boolean}
*/
#processingQueue = false;
constructor() {
this.ui = new ComfyUI(this);
/**
* List of extensions that are registered with the app
* @type {ComfyExtension[]}
*/
this.extensions = [];
/**
* Stores the execution output data for each node
* @type {Record<string, any>}
*/
this.nodeOutputs = {};
/**
* If the shift key on the keyboard is pressed
* @type {boolean}
*/
this.shiftDown = false;
}
/**
* Invoke an extension callback
* @param {string} method The extension callback to execute
* @param {...any} args Any arguments to pass to the callback
* @param {keyof ComfyExtension} method The extension callback to execute
* @param {any[]} args Any arguments to pass to the callback
* @returns
*/
#invokeExtensions(method, ...args) {
@ -691,11 +712,6 @@ class ComfyApp {
#addKeyboardHandler() {
window.addEventListener("keydown", (e) => {
this.shiftDown = e.shiftKey;
// Queue prompt using ctrl or command + enter
if ((e.ctrlKey || e.metaKey) && (e.key === "Enter" || e.keyCode === 13 || e.keyCode === 10)) {
this.queuePrompt(e.shiftKey ? -1 : 0);
}
});
window.addEventListener("keyup", (e) => {
this.shiftDown = e.shiftKey;
@ -950,6 +966,15 @@ class ComfyApp {
}
}
}
if (node.type == "KSampler" || node.type == "KSamplerAdvanced" || node.type == "PrimitiveNode") {
if (widget.name == "control_after_generate") {
if (widget.value === true) {
widget.value = "randomize";
} else if (widget.value === false) {
widget.value = "fixed";
}
}
}
}
}
@ -1127,6 +1152,10 @@ class ComfyApp {
}
}
/**
* Registers a Comfy web extension with the app
* @param {ComfyExtension} extension
*/
registerExtension(extension) {
if (!extension.name) {
throw new Error("Extensions must have a 'name' property.");

View File

@ -131,6 +131,7 @@ export async function importA1111(graph, parameters) {
}
function replaceEmbeddings(text) {
if(!embeddings.length) return text;
return text.replaceAll(
new RegExp(
"\\b(" + embeddings.map((e) => e.replace(/[.*+?^${}()|[\]\\]/g, "\\$&")).join("\\b|\\b") + ")\\b",

View File

@ -270,6 +270,30 @@ class ComfySettingsDialog extends ComfyDialog {
]),
]);
break;
case "slider":
element = $el("div", [
$el("label", { textContent: name }, [
$el("input", {
type: "range",
value,
oninput: (e) => {
setter(e.target.value);
e.target.nextElementSibling.value = e.target.value;
},
...attrs
}),
$el("input", {
type: "number",
value,
oninput: (e) => {
setter(e.target.value);
e.target.previousElementSibling.value = e.target.value;
},
...attrs
}),
]),
]);
break;
default:
console.warn("Unsupported setting type, defaulting to text");
element = $el("div", [
@ -431,7 +455,15 @@ export class ComfyUI {
defaultValue: true,
});
const promptFilename = this.settings.addSetting({
id: "Comfy.PromptFilename",
name: "Prompt for filename when saving workflow",
type: "boolean",
defaultValue: true,
});
const fileInput = $el("input", {
id: "comfy-file-input",
type: "file",
accept: ".json,image/png",
style: { display: "none" },
@ -448,6 +480,7 @@ export class ComfyUI {
$el("button.comfy-settings-btn", { textContent: "⚙️", onclick: () => this.settings.show() }),
]),
$el("button.comfy-queue-btn", {
id: "queue-button",
textContent: "Queue Prompt",
onclick: () => app.queuePrompt(0, this.batchCount),
}),
@ -496,9 +529,10 @@ export class ComfyUI {
]),
]),
$el("div.comfy-menu-btns", [
$el("button", { textContent: "Queue Front", onclick: () => app.queuePrompt(-1, this.batchCount) }),
$el("button", { id: "queue-front-button", textContent: "Queue Front", onclick: () => app.queuePrompt(-1, this.batchCount) }),
$el("button", {
$: (b) => (this.queue.button = b),
id: "comfy-view-queue-button",
textContent: "View Queue",
onclick: () => {
this.history.hide();
@ -507,6 +541,7 @@ export class ComfyUI {
}),
$el("button", {
$: (b) => (this.history.button = b),
id: "comfy-view-history-button",
textContent: "View History",
onclick: () => {
this.queue.hide();
@ -517,14 +552,23 @@ export class ComfyUI {
this.queue.element,
this.history.element,
$el("button", {
id: "comfy-save-button",
textContent: "Save",
onclick: () => {
let filename = "workflow.json";
if (promptFilename.value) {
filename = prompt("Save workflow as:", filename);
if (!filename) return;
if (!filename.toLowerCase().endsWith(".json")) {
filename += ".json";
}
}
const json = JSON.stringify(app.graph.serialize(), null, 2); // convert the data to a JSON string
const blob = new Blob([json], { type: "application/json" });
const url = URL.createObjectURL(blob);
const a = $el("a", {
href: url,
download: "workflow.json",
download: filename,
style: { display: "none" },
parent: document.body,
});
@ -535,15 +579,15 @@ export class ComfyUI {
}, 0);
},
}),
$el("button", { textContent: "Load", onclick: () => fileInput.click() }),
$el("button", { textContent: "Refresh", onclick: () => app.refreshComboInNodes() }),
$el("button", { textContent: "Clear", onclick: () => {
$el("button", { id: "comfy-load-button", textContent: "Load", onclick: () => fileInput.click() }),
$el("button", { id: "comfy-refresh-button", textContent: "Refresh", onclick: () => app.refreshComboInNodes() }),
$el("button", { id: "comfy-clear-button", textContent: "Clear", onclick: () => {
if (!confirmClear.value || confirm("Clear workflow?")) {
app.clean();
app.graph.clear();
}
}}),
$el("button", { textContent: "Load Default", onclick: () => {
$el("button", { id: "comfy-load-default-button", textContent: "Load Default", onclick: () => {
if (!confirmClear.value || confirm("Load default workflow?")) {
app.loadGraphData()
}

View File

@ -10,37 +10,54 @@ function getNumberDefaults(inputData, defaultStep) {
return { val: defaultVal, config: { min, max, step: 10.0 * step } };
}
export function addRandomizeWidget(node, targetWidget, name, defaultValue = false) {
const randomize = node.addWidget("toggle", name, defaultValue, function (v) {}, {
on: "enabled",
off: "disabled",
serialize: false, // Don't include this in prompt.
});
export function addValueControlWidget(node, targetWidget, defaultValue = "randomize", values) {
const valueControl = node.addWidget("combo", "control_after_generate", defaultValue, function (v) { }, {
values: ["fixed", "increment", "decrement", "randomize"],
serialize: false, // Don't include this in prompt.
});
valueControl.afterQueued = () => {
randomize.afterQueued = () => {
if (randomize.value) {
const min = targetWidget.options?.min;
let max = targetWidget.options?.max;
if (min != null || max != null) {
if (max) {
// limit max to something that javascript can handle
max = Math.min(1125899906842624, max);
}
targetWidget.value = Math.floor(Math.random() * ((max ?? 9999999999) - (min ?? 0) + 1) + (min ?? 0));
} else {
targetWidget.value = Math.floor(Math.random() * 1125899906842624);
}
var v = valueControl.value;
let min = targetWidget.options.min;
let max = targetWidget.options.max;
// limit to something that javascript can handle
max = Math.min(1125899906842624, max);
min = Math.max(-1125899906842624, min);
let range = (max - min) / (targetWidget.options.step / 10);
//adjust values based on valueControl Behaviour
switch (v) {
case "fixed":
break;
case "increment":
targetWidget.value += targetWidget.options.step / 10;
break;
case "decrement":
targetWidget.value -= targetWidget.options.step / 10;
break;
case "randomize":
targetWidget.value = Math.floor(Math.random() * range) * (targetWidget.options.step / 10) + min;
default:
break;
}
};
return randomize;
}
/*check if values are over or under their respective
* ranges and set them to min or max.*/
if (targetWidget.value < min)
targetWidget.value = min;
if (targetWidget.value > max)
targetWidget.value = max;
}
return valueControl;
};
function seedWidget(node, inputName, inputData) {
const seed = ComfyWidgets.INT(node, inputName, inputData);
const randomize = addRandomizeWidget(node, seed.widget, "Random seed after every gen", true);
const seedControl = addValueControlWidget(node, seed.widget, "randomize");
seed.widget.linkedWidgets = [randomize];
return { widget: seed, randomize };
seed.widget.linkedWidgets = [seedControl];
return seed;
}
const MultilineSymbol = Symbol();

View File

@ -160,9 +160,9 @@ body {
.comfy-list {
color: var(--descrip-text);
background-color: #333;
background-color: var(--comfy-menu-bg);
margin-bottom: 10px;
border-color: #4e4e4e;
border-color: var(--border-color);
border-style: solid;
}
@ -217,6 +217,14 @@ button.comfy-queue-btn {
z-index: 99;
}
.comfy-modal.comfy-settings input[type="range"] {
vertical-align: middle;
}
.comfy-modal.comfy-settings input[type="range"] + input[type="number"] {
width: 3.5em;
}
.comfy-modal input,
.comfy-modal select {
color: var(--input-text);

78
web/types/comfy.d.ts vendored Normal file
View File

@ -0,0 +1,78 @@
import { LGraphNode, IWidget } from "./litegraph";
import { ComfyApp } from "/scripts/app";
export interface ComfyExtension {
/**
* The name of the extension
*/
name: string;
/**
* Allows any initialisation, e.g. loading resources. Called after the canvas is created but before nodes are added
* @param app The ComfyUI app instance
*/
init(app: ComfyApp): Promise<void>;
/**
* Allows any additonal setup, called after the application is fully set up and running
* @param app The ComfyUI app instance
*/
setup(app: ComfyApp): Promise<void>;
/**
* Called before nodes are registered with the graph
* @param defs The collection of node definitions, add custom ones or edit existing ones
* @param app The ComfyUI app instance
*/
addCustomNodeDefs(defs: Record<string, ComfyObjectInfo>, app: ComfyApp): Promise<void>;
/**
* Allows the extension to add custom widgets
* @param app The ComfyUI app instance
* @returns An array of {[widget name]: widget data}
*/
getCustomWidgets(
app: ComfyApp
): Promise<
Array<
Record<string, (node, inputName, inputData, app) => { widget?: IWidget; minWidth?: number; minHeight?: number }>
>
>;
/**
* Allows the extension to add additional handling to the node before it is registered with LGraph
* @param nodeType The node class (not an instance)
* @param nodeData The original node object info config object
* @param app The ComfyUI app instance
*/
beforeRegisterNodeDef(nodeType: typeof LGraphNode, nodeData: ComfyObjectInfo, app: ComfyApp): Promise<void>;
/**
* Allows the extension to register additional nodes with LGraph after standard nodes are added
* @param app The ComfyUI app instance
*/
registerCustomNodes(app: ComfyApp): Promise<void>;
/**
* Allows the extension to modify a node that has been reloaded onto the graph.
* If you break something in the backend and want to patch workflows in the frontend
* This is the place to do this
* @param node The node that has been loaded
* @param app The ComfyUI app instance
*/
loadedGraphNode(node: LGraphNode, app: ComfyApp);
/**
* Allows the extension to run code after the constructor of the node
* @param node The node that has been created
* @param app The ComfyUI app instance
*/
nodeCreated(node: LGraphNode, app: ComfyApp);
}
export type ComfyObjectInfo = {
name: string;
display_name?: string;
description?: string;
category: string;
input?: {
required?: Record<string, ComfyObjectInfoConfig>;
optional?: Record<string, ComfyObjectInfoConfig>;
};
output?: string[];
output_name: string[];
};
export type ComfyObjectInfoConfig = [string | any[]] | [string | any[], any];

1506
web/types/litegraph.d.ts vendored Normal file

File diff suppressed because it is too large Load Diff