Merge remote-tracking branch 'upstream/master'

This commit is contained in:
Silversith 2023-04-18 09:07:50 +02:00
commit b75466f6aa
33 changed files with 2827 additions and 282 deletions

5
.gitignore vendored
View File

@ -5,8 +5,7 @@ input/
!input/example.png
models/
temp/
custom_nodes/
!custom_nodes/example_node.py.example
extra_model_paths.yaml
venv/
venv38/
.idea/
/.vs

View File

@ -32,9 +32,28 @@ This ui will let you design and execute advanced stable diffusion pipelines usin
Workflow examples can be found on the [Examples page](https://comfyanonymous.github.io/ComfyUI_examples/)
## Shortcuts
- **Ctrl + A** select all nodes
- **Ctrl + M** mute/unmute selected nodes
- **Delete** or **Backspace** delete selected nodes
| Keybind | Explanation |
| - | - |
| Ctrl + Enter | Queue up current graph for generation |
| Ctrl + Shift + Enter | Queue up current graph as first for generation |
| Ctrl + S | Save workflow |
| Ctrl + O | Load workflow |
| Ctrl + A | Select all nodes |
| Ctrl + M | Mute/unmute selected nodes |
| Delete/Backspace | Delete selected nodes |
| Ctrl + Delete/Backspace | Delete the current graph |
| Space | Move the canvas around when held and moving the cursor |
| Ctrl/Shift + Click | Add clicked node to selection |
| Ctrl + C/Ctrl + V | Copy and paste selected nodes (without maintaining connections to outputs of unselected nodes) |
| Ctrl + C/Ctrl + Shift + V| Copy and paste selected nodes (maintaining connections from outputs of unselected nodes to inputs of pasted nodes) |
| Shift + Drag | Move multiple selected nodes at the same time |
| Ctrl + D | Load default graph |
| Q | Toggle visibility of the queue |
| H | Toggle visibility of history |
| R | Refresh graph |
Ctrl can also be replaced with Cmd instead for MacOS users
# Installing
@ -64,7 +83,7 @@ Put your VAE in: models/vae
At the time of writing this pytorch has issues with python versions higher than 3.10 so make sure your python/pip versions are 3.10.
### AMD (Linux only)
### AMD GPUs (Linux only)
AMD users can install rocm and pytorch with pip if you don't have it already installed, this is the command to install the stable version:
```pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/rocm5.4.2```
@ -92,6 +111,11 @@ Install the dependencies by opening your terminal inside the ComfyUI folder and:
After this you should have everything installed and can proceed to running ComfyUI.
### Others:
[Intel Arc](https://github.com/comfyanonymous/ComfyUI/discussions/476)
Mac/MPS: There is basic support in the code but until someone makes some install instruction you are on your own.
### I already have another UI for Stable Diffusion installed do I really have to install all of these dependencies?

View File

@ -9,7 +9,7 @@ from typing import Optional, Any
from ldm.modules.diffusionmodules.util import checkpoint
from .sub_quadratic_attention import efficient_dot_product_attention
import model_management
from comfy import model_management
from . import tomesd

View File

@ -7,7 +7,7 @@ from einops import rearrange
from typing import Optional, Any
from ldm.modules.attention import MemoryEfficientCrossAttention
import model_management
from comfy import model_management
if model_management.xformers_enabled_vae():
import xformers

View File

@ -24,7 +24,7 @@ except ImportError:
from torch import Tensor
from typing import List
import model_management
from comfy import model_management
def dynamic_slice(
x: Tensor,

View File

@ -307,6 +307,15 @@ def should_use_fp16():
return True
def soft_empty_cache():
global xpu_available
if xpu_available:
torch.xpu.empty_cache()
elif torch.cuda.is_available():
if torch.version.cuda: #This seems to make things worse on ROCm so I only do it for cuda
torch.cuda.empty_cache()
torch.cuda.ipc_collect()
#TODO: might be cleaner to put this somewhere else
import threading

View File

@ -3,7 +3,7 @@ from .k_diffusion import external as k_diffusion_external
from .extra_samplers import uni_pc
import torch
import contextlib
import model_management
from comfy import model_management
from .ldm.models.diffusion.ddim import DDIMSampler
from .ldm.modules.diffusionmodules.util import make_ddim_timesteps
@ -211,7 +211,10 @@ def sampling_function(model_function, x, timestep, uncond, cond, cond_scale, con
max_total_area = model_management.maximum_batch_area()
cond, uncond = calc_cond_uncond_batch(model_function, cond, uncond, x, timestep, max_total_area, cond_concat, model_options)
return uncond + (cond - uncond) * cond_scale
if "sampler_cfg_function" in model_options:
return model_options["sampler_cfg_function"](cond, uncond, cond_scale)
else:
return uncond + (cond - uncond) * cond_scale
class CompVisVDenoiser(k_diffusion_external.DiscreteVDDPMDenoiser):

View File

@ -4,7 +4,7 @@ import copy
import sd1_clip
import sd2_clip
import model_management
from comfy import model_management
from .ldm.util import instantiate_from_config
from .ldm.models.autoencoder import AutoencoderKL
import yaml
@ -250,6 +250,9 @@ class ModelPatcher:
def set_model_tomesd(self, ratio):
self.model_options["transformer_options"]["tomesd"] = {"ratio": ratio}
def set_model_sampler_cfg_function(self, sampler_cfg_function):
self.model_options["sampler_cfg_function"] = sampler_cfg_function
def model_dtype(self):
return self.model.diffusion_model.dtype
@ -372,10 +375,12 @@ class CLIP:
def clip_layer(self, layer_idx):
self.layer_idx = layer_idx
def encode(self, text):
def tokenize(self, text, return_word_ids=False):
return self.tokenizer.tokenize_with_weights(text, return_word_ids)
def encode_from_tokens(self, tokens):
if self.layer_idx is not None:
self.cond_stage_model.clip_layer(self.layer_idx)
tokens = self.tokenizer.tokenize_with_weights(text)
try:
self.patcher.patch_model()
cond = self.cond_stage_model.encode_token_weights(tokens)
@ -385,6 +390,10 @@ class CLIP:
raise e
return cond
def encode(self, text):
tokens = self.tokenize(text)
return self.encode_from_tokens(tokens)
class VAE:
def __init__(self, ckpt_path=None, scale_factor=0.18215, device=None, config=None):
if config is None:

View File

@ -2,6 +2,8 @@ import os
from transformers import CLIPTokenizer, CLIPTextModel, CLIPTextConfig
import torch
import traceback
import zipfile
class ClipTokenWeightEncoder:
def encode_token_weights(self, token_weight_pairs):
@ -170,6 +172,26 @@ def unescape_important(text):
text = text.replace("\0\2", "(")
return text
def safe_load_embed_zip(embed_path):
with zipfile.ZipFile(embed_path) as myzip:
names = list(filter(lambda a: "data/" in a, myzip.namelist()))
names.reverse()
for n in names:
with myzip.open(n) as myfile:
data = myfile.read()
number = len(data) // 4
length_embed = 1024 #sd2.x
if number < 768:
continue
if number % 768 == 0:
length_embed = 768 #sd1.x
num_embeds = number // length_embed
embed = torch.frombuffer(data, dtype=torch.float)
out = embed.reshape((num_embeds, length_embed)).clone()
del embed
return out
def load_embed(embedding_name, embedding_directory):
if isinstance(embedding_directory, str):
embedding_directory = [embedding_directory]
@ -194,19 +216,33 @@ def load_embed(embedding_name, embedding_directory):
embed_path = valid_file
if embed_path.lower().endswith(".safetensors"):
import safetensors.torch
embed = safetensors.torch.load_file(embed_path, device="cpu")
else:
if 'weights_only' in torch.load.__code__.co_varnames:
embed = torch.load(embed_path, weights_only=True, map_location="cpu")
embed_out = None
try:
if embed_path.lower().endswith(".safetensors"):
import safetensors.torch
embed = safetensors.torch.load_file(embed_path, device="cpu")
else:
embed = torch.load(embed_path, map_location="cpu")
if 'string_to_param' in embed:
values = embed['string_to_param'].values()
else:
values = embed.values()
return next(iter(values))
if 'weights_only' in torch.load.__code__.co_varnames:
try:
embed = torch.load(embed_path, weights_only=True, map_location="cpu")
except:
embed_out = safe_load_embed_zip(embed_path)
else:
embed = torch.load(embed_path, map_location="cpu")
except Exception as e:
print(traceback.format_exc())
print()
print("error loading embedding, skipping loading:", embedding_name)
return None
if embed_out is None:
if 'string_to_param' in embed:
values = embed['string_to_param'].values()
else:
values = embed.values()
embed_out = next(iter(values))
return embed_out
class SD1Tokenizer:
def __init__(self, tokenizer_path=None, max_length=77, pad_with_end=True, embedding_directory=None):
@ -224,60 +260,97 @@ class SD1Tokenizer:
self.inv_vocab = {v: k for k, v in vocab.items()}
self.embedding_directory = embedding_directory
self.max_word_length = 8
self.embedding_identifier = "embedding:"
def _try_get_embedding(self, embedding_name:str):
'''
Takes a potential embedding name and tries to retrieve it.
Returns a Tuple consisting of the embedding and any leftover string, embedding can be None.
'''
embed = load_embed(embedding_name, self.embedding_directory)
if embed is None:
stripped = embedding_name.strip(',')
if len(stripped) < len(embedding_name):
embed = load_embed(stripped, self.embedding_directory)
return (embed, embedding_name[len(stripped):])
return (embed, "")
def tokenize_with_weights(self, text:str, return_word_ids=False):
'''
Takes a prompt and converts it to a list of (token, weight, word id) elements.
Tokens can both be integer tokens and pre computed CLIP tensors.
Word id values are unique per word and embedding, where the id 0 is reserved for non word tokens.
Returned list has the dimensions NxM where M is the input size of CLIP
'''
if self.pad_with_end:
pad_token = self.end_token
else:
pad_token = 0
def tokenize_with_weights(self, text):
text = escape_important(text)
parsed_weights = token_weights(text, 1.0)
#tokenize words
tokens = []
for t in parsed_weights:
to_tokenize = unescape_important(t[0]).replace("\n", " ").split(' ')
while len(to_tokenize) > 0:
word = to_tokenize.pop(0)
temp_tokens = []
embedding_identifier = "embedding:"
if word.startswith(embedding_identifier) and self.embedding_directory is not None:
embedding_name = word[len(embedding_identifier):].strip('\n')
embed = load_embed(embedding_name, self.embedding_directory)
for weighted_segment, weight in parsed_weights:
to_tokenize = unescape_important(weighted_segment).replace("\n", " ").split(' ')
to_tokenize = [x for x in to_tokenize if x != ""]
for word in to_tokenize:
#if we find an embedding, deal with the embedding
if word.startswith(self.embedding_identifier) and self.embedding_directory is not None:
embedding_name = word[len(self.embedding_identifier):].strip('\n')
embed, leftover = self._try_get_embedding(embedding_name)
if embed is None:
stripped = embedding_name.strip(',')
if len(stripped) < len(embedding_name):
embed = load_embed(stripped, self.embedding_directory)
if embed is not None:
to_tokenize.insert(0, embedding_name[len(stripped):])
if embed is not None:
if len(embed.shape) == 1:
temp_tokens += [(embed, t[1])]
else:
for x in range(embed.shape[0]):
temp_tokens += [(embed[x], t[1])]
print(f"warning, embedding:{embedding_name} does not exist, ignoring")
else:
print("warning, embedding:{} does not exist, ignoring".format(embedding_name))
elif len(word) > 0:
tt = self.tokenizer(word)["input_ids"][1:-1]
for x in tt:
temp_tokens += [(x, t[1])]
tokens_left = self.max_tokens_per_section - (len(tokens) % self.max_tokens_per_section)
if len(embed.shape) == 1:
tokens.append([(embed, weight)])
else:
tokens.append([(embed[x], weight) for x in range(embed.shape[0])])
#if we accidentally have leftover text, continue parsing using leftover, else move on to next word
if leftover != "":
word = leftover
else:
continue
#parse word
tokens.append([(t, weight) for t in self.tokenizer(word)["input_ids"][1:-1]])
#try not to split words in different sections
if tokens_left < len(temp_tokens) and len(temp_tokens) < (self.max_word_length):
for x in range(tokens_left):
tokens += [(self.end_token, 1.0)]
tokens += temp_tokens
#reshape token array to CLIP input size
batched_tokens = []
batch = [(self.start_token, 1.0, 0)]
batched_tokens.append(batch)
for i, t_group in enumerate(tokens):
#determine if we're going to try and keep the tokens in a single batch
is_large = len(t_group) >= self.max_word_length
out_tokens = []
for x in range(0, len(tokens), self.max_tokens_per_section):
o_token = [(self.start_token, 1.0)] + tokens[x:min(self.max_tokens_per_section + x, len(tokens))]
o_token += [(self.end_token, 1.0)]
if self.pad_with_end:
o_token +=[(self.end_token, 1.0)] * (self.max_length - len(o_token))
else:
o_token +=[(0, 1.0)] * (self.max_length - len(o_token))
while len(t_group) > 0:
if len(t_group) + len(batch) > self.max_length - 1:
remaining_length = self.max_length - len(batch) - 1
#break word in two and add end token
if is_large:
batch.extend([(t,w,i+1) for t,w in t_group[:remaining_length]])
batch.append((self.end_token, 1.0, 0))
t_group = t_group[remaining_length:]
#add end token and pad
else:
batch.append((self.end_token, 1.0, 0))
batch.extend([(pad_token, 1.0, 0)] * (remaining_length))
#start new batch
batch = [(self.start_token, 1.0, 0)]
batched_tokens.append(batch)
else:
batch.extend([(t,w,i+1) for t,w in t_group])
t_group = []
out_tokens += [o_token]
#fill last batch
batch.extend([(self.end_token, 1.0, 0)] + [(pad_token, 1.0, 0)] * (self.max_length - len(batch) - 1))
if not return_word_ids:
batched_tokens = [[(t, w) for t, w,_ in x] for x in batched_tokens]
return batched_tokens
return out_tokens
def untokenize(self, token_weight_pair):
return list(map(lambda a: (a, self.inv_vocab[a[0]]), token_weight_pair))

View File

@ -1,4 +1,4 @@
import sd1_clip
from comfy import sd1_clip
import torch
import os

262
comfy_extras/nodes_mask.py Normal file
View File

@ -0,0 +1,262 @@
import torch
from nodes import MAX_RESOLUTION
class LatentCompositeMasked:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"destination": ("LATENT",),
"source": ("LATENT",),
"x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
"y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
},
"optional": {
"mask": ("MASK",),
}
}
RETURN_TYPES = ("LATENT",)
FUNCTION = "composite"
CATEGORY = "latent"
def composite(self, destination, source, x, y, mask = None):
output = destination.copy()
destination = destination["samples"].clone()
source = source["samples"]
x = max(-source.shape[3] * 8, min(x, destination.shape[3] * 8))
y = max(-source.shape[2] * 8, min(y, destination.shape[2] * 8))
left, top = (x // 8, y // 8)
right, bottom = (left + source.shape[3], top + source.shape[2],)
if mask is None:
mask = torch.ones_like(source)
else:
mask = mask.clone()
mask = torch.nn.functional.interpolate(mask[None, None], size=(source.shape[2], source.shape[3]), mode="bilinear")
mask = mask.repeat((source.shape[0], source.shape[1], 1, 1))
# calculate the bounds of the source that will be overlapping the destination
# this prevents the source trying to overwrite latent pixels that are out of bounds
# of the destination
visible_width, visible_height = (destination.shape[3] - left + min(0, x), destination.shape[2] - top + min(0, y),)
mask = mask[:, :, :visible_height, :visible_width]
inverse_mask = torch.ones_like(mask) - mask
source_portion = mask * source[:, :, :visible_height, :visible_width]
destination_portion = inverse_mask * destination[:, :, top:bottom, left:right]
destination[:, :, top:bottom, left:right] = source_portion + destination_portion
output["samples"] = destination
return (output,)
class MaskToImage:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"mask": ("MASK",),
}
}
CATEGORY = "mask"
RETURN_TYPES = ("IMAGE",)
FUNCTION = "mask_to_image"
def mask_to_image(self, mask):
result = mask[None, :, :, None].expand(-1, -1, -1, 3)
return (result,)
class ImageToMask:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"image": ("IMAGE",),
"channel": (["red", "green", "blue"],),
}
}
CATEGORY = "mask"
RETURN_TYPES = ("MASK",)
FUNCTION = "image_to_mask"
def image_to_mask(self, image, channel):
channels = ["red", "green", "blue"]
mask = image[0, :, :, channels.index(channel)]
return (mask,)
class SolidMask:
@classmethod
def INPUT_TYPES(cls):
return {
"required": {
"value": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}),
"width": ("INT", {"default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1}),
"height": ("INT", {"default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1}),
}
}
CATEGORY = "mask"
RETURN_TYPES = ("MASK",)
FUNCTION = "solid"
def solid(self, value, width, height):
out = torch.full((height, width), value, dtype=torch.float32, device="cpu")
return (out,)
class InvertMask:
@classmethod
def INPUT_TYPES(cls):
return {
"required": {
"mask": ("MASK",),
}
}
CATEGORY = "mask"
RETURN_TYPES = ("MASK",)
FUNCTION = "invert"
def invert(self, mask):
out = 1.0 - mask
return (out,)
class CropMask:
@classmethod
def INPUT_TYPES(cls):
return {
"required": {
"mask": ("MASK",),
"x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
"y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
"width": ("INT", {"default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1}),
"height": ("INT", {"default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1}),
}
}
CATEGORY = "mask"
RETURN_TYPES = ("MASK",)
FUNCTION = "crop"
def crop(self, mask, x, y, width, height):
out = mask[y:y + height, x:x + width]
return (out,)
class MaskComposite:
@classmethod
def INPUT_TYPES(cls):
return {
"required": {
"destination": ("MASK",),
"source": ("MASK",),
"x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
"y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
"operation": (["multiply", "add", "subtract"],),
}
}
CATEGORY = "mask"
RETURN_TYPES = ("MASK",)
FUNCTION = "combine"
def combine(self, destination, source, x, y, operation):
output = destination.clone()
left, top = (x, y,)
right, bottom = (min(left + source.shape[1], destination.shape[1]), min(top + source.shape[0], destination.shape[0]))
visible_width, visible_height = (right - left, bottom - top,)
source_portion = source[:visible_height, :visible_width]
destination_portion = destination[top:bottom, left:right]
if operation == "multiply":
output[top:bottom, left:right] = destination_portion * source_portion
elif operation == "add":
output[top:bottom, left:right] = destination_portion + source_portion
elif operation == "subtract":
output[top:bottom, left:right] = destination_portion - source_portion
output = torch.clamp(output, 0.0, 1.0)
return (output,)
class FeatherMask:
@classmethod
def INPUT_TYPES(cls):
return {
"required": {
"mask": ("MASK",),
"left": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
"top": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
"right": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
"bottom": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}),
}
}
CATEGORY = "mask"
RETURN_TYPES = ("MASK",)
FUNCTION = "feather"
def feather(self, mask, left, top, right, bottom):
output = mask.clone()
left = min(left, output.shape[1])
right = min(right, output.shape[1])
top = min(top, output.shape[0])
bottom = min(bottom, output.shape[0])
for x in range(left):
feather_rate = (x + 1.0) / left
output[:, x] *= feather_rate
for x in range(right):
feather_rate = (x + 1) / right
output[:, -x] *= feather_rate
for y in range(top):
feather_rate = (y + 1) / top
output[y, :] *= feather_rate
for y in range(bottom):
feather_rate = (y + 1) / bottom
output[-y, :] *= feather_rate
return (output,)
NODE_CLASS_MAPPINGS = {
"LatentCompositeMasked": LatentCompositeMasked,
"MaskToImage": MaskToImage,
"ImageToMask": ImageToMask,
"SolidMask": SolidMask,
"InvertMask": InvertMask,
"CropMask": CropMask,
"MaskComposite": MaskComposite,
"FeatherMask": FeatherMask,
}
NODE_DISPLAY_NAME_MAPPINGS = {
"ImageToMask": "Convert Image to Mask",
"MaskToImage": "Convert Mask to Image",
}

View File

@ -1,6 +1,6 @@
import os
from comfy_extras.chainner_models import model_loading
import model_management
from comfy import model_management
import torch
import comfy.utils
import folder_paths

View File

@ -10,6 +10,8 @@ import gc
import torch
import nodes
import comfy.model_management
def get_input_data(inputs, class_def, unique_id, outputs={}, prompt={}, extra_data={}):
valid_inputs = class_def.INPUT_TYPES()
input_data_all = {}
@ -202,10 +204,7 @@ class PromptExecutor:
self.server.send_sync("executing", { "node": None }, self.server.client_id)
gc.collect()
if torch.cuda.is_available():
if torch.version.cuda: #This seems to make things worse on ROCm so I only do it for cuda
torch.cuda.empty_cache()
torch.cuda.ipc_collect()
comfy.model_management.soft_empty_cache()
def validate_inputs(prompt, item):

View File

@ -18,6 +18,6 @@ a111:
#other_ui:
# base_path: path/to/ui
# checkpoints: models/checkpoints
# custom_nodes: path/custom_nodes

View File

@ -12,8 +12,8 @@ except:
folder_names_and_paths = {}
models_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "models")
base_path = os.path.dirname(os.path.realpath(__file__))
models_dir = os.path.join(base_path, "models")
folder_names_and_paths["checkpoints"] = ([os.path.join(models_dir, "checkpoints")], supported_ckpt_extensions)
folder_names_and_paths["configs"] = ([os.path.join(models_dir, "configs")], [".yaml"])
folder_names_and_paths["output"] = ([os.path.join(os.path.dirname(os.path.realpath(__file__)), "output")], [".png"])
@ -29,6 +29,9 @@ folder_names_and_paths["diffusers"] = ([os.path.join(models_dir, "diffusers")],
folder_names_and_paths["controlnet"] = ([os.path.join(models_dir, "controlnet"), os.path.join(models_dir, "t2i_adapter")], supported_pt_extensions)
folder_names_and_paths["upscale_models"] = ([os.path.join(models_dir, "upscale_models")], supported_pt_extensions)
folder_names_and_paths["custom_nodes"] = ([os.path.join(base_path, "custom_nodes")], [])
output_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), "output")
temp_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), "temp")
input_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), "input")

15
main.py
View File

@ -81,6 +81,14 @@ if __name__ == "__main__":
server = server.PromptServer(loop)
q = execution.PromptQueue(server)
extra_model_paths_config_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "extra_model_paths.yaml")
if os.path.isfile(extra_model_paths_config_path):
load_extra_path_config(extra_model_paths_config_path)
if args.extra_model_paths_config:
for config_path in itertools.chain(*args.extra_model_paths_config):
load_extra_path_config(config_path)
init_custom_nodes()
server.add_routes()
hijack_progress(server)
@ -91,13 +99,6 @@ if __name__ == "__main__":
dont_print = args.dont_print_server
extra_model_paths_config_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "extra_model_paths.yaml")
if os.path.isfile(extra_model_paths_config_path):
load_extra_path_config(extra_model_paths_config_path)
if args.extra_model_paths_config:
for config_path in itertools.chain(*args.extra_model_paths_config):
load_extra_path_config(config_path)
if args.output_directory:
output_dir = os.path.abspath(args.output_directory)

View File

@ -22,16 +22,16 @@ import comfy.utils
import comfy.clip_vision
import model_management
import comfy.model_management
import importlib
import folder_paths
def before_node_execution():
model_management.throw_exception_if_processing_interrupted()
comfy.model_management.throw_exception_if_processing_interrupted()
def interrupt_processing(value=True):
model_management.interrupt_current_processing(value)
comfy.model_management.interrupt_current_processing(value)
MAX_RESOLUTION=8192
@ -242,7 +242,7 @@ class DiffusersLoader:
model_path = os.path.join(search_path, model_path)
break
return comfy.diffusers_convert.load_diffusers(model_path, fp16=model_management.should_use_fp16(), output_vae=output_vae, output_clip=output_clip, embedding_directory=folder_paths.get_folder_paths("embeddings"))
return comfy.diffusers_convert.load_diffusers(model_path, fp16=comfy.model_management.should_use_fp16(), output_vae=output_vae, output_clip=output_clip, embedding_directory=folder_paths.get_folder_paths("embeddings"))
class unCLIPCheckpointLoader:
@ -511,6 +511,24 @@ class EmptyLatentImage:
return ({"samples":latent}, )
class LatentFromBatch:
@classmethod
def INPUT_TYPES(s):
return {"required": { "samples": ("LATENT",),
"batch_index": ("INT", {"default": 0, "min": 0, "max": 63}),
}}
RETURN_TYPES = ("LATENT",)
FUNCTION = "rotate"
CATEGORY = "latent"
def rotate(self, samples, batch_index):
s = samples.copy()
s_in = samples["samples"]
batch_index = min(s_in.shape[0] - 1, batch_index)
s["samples"] = s_in[batch_index:batch_index + 1].clone()
s["batch_index"] = batch_index
return (s,)
class LatentUpscale:
upscale_methods = ["nearest-exact", "bilinear", "area"]
@ -681,12 +699,19 @@ class SetLatentNoiseMask:
def common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent, denoise=1.0, disable_noise=False, start_step=None, last_step=None, force_full_denoise=False):
latent_image = latent["samples"]
noise_mask = None
device = model_management.get_torch_device()
device = comfy.model_management.get_torch_device()
if disable_noise:
noise = torch.zeros(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, device="cpu")
else:
noise = torch.randn(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, generator=torch.manual_seed(seed), device="cpu")
batch_index = 0
if "batch_index" in latent:
batch_index = latent["batch_index"]
generator = torch.manual_seed(seed)
for i in range(batch_index):
noise = torch.randn([1] + list(latent_image.size())[1:], dtype=latent_image.dtype, layout=latent_image.layout, generator=generator, device="cpu")
noise = torch.randn(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, generator=generator, device="cpu")
if "noise_mask" in latent:
noise_mask = latent['noise_mask']
@ -697,7 +722,7 @@ def common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive,
noise_mask = noise_mask.to(device)
real_model = None
model_management.load_model_gpu(model)
comfy.model_management.load_model_gpu(model)
real_model = model.model
noise = noise.to(device)
@ -727,7 +752,7 @@ def common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive,
control_net_models = []
for x in control_nets:
control_net_models += x.get_control_models()
model_management.load_controlnet_gpu(control_net_models)
comfy.model_management.load_controlnet_gpu(control_net_models)
if sampler_name in comfy.samplers.KSampler.SAMPLERS:
sampler = comfy.samplers.KSampler(real_model, steps=steps, device=device, sampler=sampler_name, scheduler=scheduler, denoise=denoise, model_options=model.model_options)
@ -873,7 +898,7 @@ class SaveImage:
"filename": file,
"subfolder": subfolder,
"type": self.type
});
})
counter += 1
return { "ui": { "images": results } }
@ -934,7 +959,7 @@ class LoadImageMask:
"channel": (["alpha", "red", "green", "blue"], ),}
}
CATEGORY = "image"
CATEGORY = "mask"
RETURN_TYPES = ("MASK",)
FUNCTION = "load_image"
@ -942,6 +967,8 @@ class LoadImageMask:
input_dir = folder_paths.get_input_directory()
image_path = os.path.join(input_dir, image)
i = Image.open(image_path)
if i.getbands() != ("R", "G", "B", "A"):
i = i.convert("RGBA")
mask = None
c = channel[0].upper()
if c in i.getbands():
@ -1073,6 +1100,7 @@ NODE_CLASS_MAPPINGS = {
"VAELoader": VAELoader,
"EmptyLatentImage": EmptyLatentImage,
"LatentUpscale": LatentUpscale,
"LatentFromBatch": LatentFromBatch,
"SaveImage": SaveImage,
"PreviewImage": PreviewImage,
"LoadImage": LoadImage,
@ -1178,18 +1206,20 @@ def load_custom_node(module_path):
print(f"Cannot import {module_path} module for custom nodes:", e)
def load_custom_nodes():
CUSTOM_NODE_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "custom_nodes")
possible_modules = os.listdir(CUSTOM_NODE_PATH)
if "__pycache__" in possible_modules:
possible_modules.remove("__pycache__")
node_paths = folder_paths.get_folder_paths("custom_nodes")
for custom_node_path in node_paths:
possible_modules = os.listdir(custom_node_path)
if "__pycache__" in possible_modules:
possible_modules.remove("__pycache__")
for possible_module in possible_modules:
module_path = os.path.join(CUSTOM_NODE_PATH, possible_module)
if os.path.isfile(module_path) and os.path.splitext(module_path)[1] != ".py": continue
load_custom_node(module_path)
for possible_module in possible_modules:
module_path = os.path.join(custom_node_path, possible_module)
if os.path.isfile(module_path) and os.path.splitext(module_path)[1] != ".py": continue
load_custom_node(module_path)
def init_custom_nodes():
load_custom_nodes()
load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_upscale_model.py"))
load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_post_processing.py"))
load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "silver_custom.py"))
load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_mask.py"))
load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "silver_custom.py"))

View File

@ -119,9 +119,20 @@
"\n",
"\n",
"# ControlNet\n",
"#!wget -c https://huggingface.co/webui/ControlNet-modules-safetensors/resolve/main/control_depth-fp16.safetensors -P ./models/controlnet/\n",
"#!wget -c https://huggingface.co/webui/ControlNet-modules-safetensors/resolve/main/control_scribble-fp16.safetensors -P ./models/controlnet/\n",
"#!wget -c https://huggingface.co/webui/ControlNet-modules-safetensors/resolve/main/control_openpose-fp16.safetensors -P ./models/controlnet/\n",
"#!wget -c https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11e_sd15_ip2p_fp16.safetensors -P ./models/controlnet/\n",
"#!wget -c https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11e_sd15_shuffle_fp16.safetensors -P ./models/controlnet/\n",
"#!wget -c https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11p_sd15_canny_fp16.safetensors -P ./models/controlnet/\n",
"#!wget -c https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11f1p_sd15_depth_fp16.safetensors -P ./models/controlnet/\n",
"#!wget -c https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11p_sd15_inpaint_fp16.safetensors -P ./models/controlnet/\n",
"#!wget -c https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11p_sd15_lineart_fp16.safetensors -P ./models/controlnet/\n",
"#!wget -c https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11p_sd15_mlsd_fp16.safetensors -P ./models/controlnet/\n",
"#!wget -c https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11p_sd15_normalbae_fp16.safetensors -P ./models/controlnet/\n",
"#!wget -c https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11p_sd15_openpose_fp16.safetensors -P ./models/controlnet/\n",
"#!wget -c https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11p_sd15_scribble_fp16.safetensors -P ./models/controlnet/\n",
"#!wget -c https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11p_sd15_seg_fp16.safetensors -P ./models/controlnet/\n",
"#!wget -c https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11p_sd15_softedge_fp16.safetensors -P ./models/controlnet/\n",
"#!wget -c https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11p_sd15s2_lineart_anime_fp16.safetensors -P ./models/controlnet/\n",
"#!wget -c https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11u_sd15_tile_fp16.safetensors -P ./models/controlnet/\n",
"\n",
"\n",
"# Controlnet Preprocessor nodes by Fannovel16\n",

View File

@ -5,9 +5,9 @@ import { api } from "/scripts/api.js";
// Manage color palettes
const colorPalettes = {
"palette_1": {
"id": "palette_1",
"name": "Palette 1",
"dark": {
"id": "dark",
"name": "Dark (Default)",
"colors": {
"node_slot": {
"CLIP": "#FFD500", // bright yellow
@ -45,6 +45,70 @@ const colorPalettes = {
"EVENT_LINK_COLOR": "#A86",
"CONNECTING_LINK_COLOR": "#AFA",
},
"comfy_base": {
"fg-color": "#fff",
"bg-color": "#202020",
"comfy-menu-bg": "#353535",
"comfy-input-bg": "#222",
"input-text": "#ddd",
"descrip-text": "#999",
"drag-text": "#ccc",
"error-text": "#ff4444",
"border-color": "#4e4e4e"
}
},
},
"light": {
"id": "light",
"name": "Light",
"colors": {
"node_slot": {
"CLIP": "#FFA726", // orange
"CLIP_VISION": "#5C6BC0", // indigo
"CLIP_VISION_OUTPUT": "#8D6E63", // brown
"CONDITIONING": "#EF5350", // red
"CONTROL_NET": "#66BB6A", // green
"IMAGE": "#42A5F5", // blue
"LATENT": "#AB47BC", // purple
"MASK": "#9CCC65", // light green
"MODEL": "#7E57C2", // deep purple
"STYLE_MODEL": "#D4E157", // lime
"VAE": "#FF7043", // deep orange
},
"litegraph_base": {
"NODE_TITLE_COLOR": "#222",
"NODE_SELECTED_TITLE_COLOR": "#000",
"NODE_TEXT_SIZE": 14,
"NODE_TEXT_COLOR": "#444",
"NODE_SUBTEXT_SIZE": 12,
"NODE_DEFAULT_COLOR": "#F7F7F7",
"NODE_DEFAULT_BGCOLOR": "#F5F5F5",
"NODE_DEFAULT_BOXCOLOR": "#CCC",
"NODE_DEFAULT_SHAPE": "box",
"NODE_BOX_OUTLINE_COLOR": "#000",
"DEFAULT_SHADOW_COLOR": "rgba(0,0,0,0.1)",
"DEFAULT_GROUP_FONT": 24,
"WIDGET_BGCOLOR": "#D4D4D4",
"WIDGET_OUTLINE_COLOR": "#999",
"WIDGET_TEXT_COLOR": "#222",
"WIDGET_SECONDARY_TEXT_COLOR": "#555",
"LINK_COLOR": "#4CAF50",
"EVENT_LINK_COLOR": "#FF9800",
"CONNECTING_LINK_COLOR": "#2196F3",
},
"comfy_base": {
"fg-color": "#222",
"bg-color": "#DDD",
"comfy-menu-bg": "#F5F5F5",
"comfy-input-bg": "#C9C9C9",
"input-text": "#222",
"descrip-text": "#444",
"drag-text": "#555",
"error-text": "#F44336",
"border-color": "#888"
}
},
},
"solarized": {
@ -52,49 +116,60 @@ const colorPalettes = {
"name": "Solarized",
"colors": {
"node_slot": {
"CLIP": "#859900", // Green
"CLIP_VISION": "#6c71c4", // Indigo
"CLIP_VISION_OUTPUT": "#859900", // Green
"CONDITIONING": "#d33682", // Magenta
"CONTROL_NET": "#cb4b16", // Orange
"IMAGE": "#dc322f", // Red
"LATENT": "#268bd2", // Blue
"MASK": "#073642", // Base02
"MODEL": "#cb4b16", // Orange
"STYLE_MODEL": "#073642", // Base02
"UPSCALE_MODEL": "#6c71c4", // Indigo
"VAE": "#586e75", // Base1
"CLIP": "#2AB7CA", // light blue
"CLIP_VISION": "#6c71c4", // blue violet
"CLIP_VISION_OUTPUT": "#859900", // olive green
"CONDITIONING": "#d33682", // magenta
"CONTROL_NET": "#d1ffd7", // light mint green
"IMAGE": "#5940bb", // deep blue violet
"LATENT": "#268bd2", // blue
"MASK": "#CCC9E7", // light purple-gray
"MODEL": "#dc322f", // red
"STYLE_MODEL": "#1a998a", // teal
"UPSCALE_MODEL": "#054A29", // dark green
"VAE": "#facfad", // light pink-orange
},
"litegraph_base": {
"NODE_TITLE_COLOR": "#fdf6e3",
"NODE_SELECTED_TITLE_COLOR": "#b58900",
"NODE_TITLE_COLOR": "#fdf6e3", // Base3
"NODE_SELECTED_TITLE_COLOR": "#A9D400",
"NODE_TEXT_SIZE": 14,
"NODE_TEXT_COLOR": "#657b83",
"NODE_TEXT_COLOR": "#657b83", // Base00
"NODE_SUBTEXT_SIZE": 12,
"NODE_DEFAULT_COLOR": "#586e75",
"NODE_DEFAULT_BGCOLOR": "#073642",
"NODE_DEFAULT_BOXCOLOR": "#839496",
"NODE_DEFAULT_COLOR": "#094656",
"NODE_DEFAULT_BGCOLOR": "#073642", // Base02
"NODE_DEFAULT_BOXCOLOR": "#839496", // Base0
"NODE_DEFAULT_SHAPE": "box",
"NODE_BOX_OUTLINE_COLOR": "#fdf6e3",
"NODE_BOX_OUTLINE_COLOR": "#fdf6e3", // Base3
"DEFAULT_SHADOW_COLOR": "rgba(0,0,0,0.5)",
"DEFAULT_GROUP_FONT": 24,
"WIDGET_BGCOLOR": "#002b36",
"WIDGET_OUTLINE_COLOR": "#839496",
"WIDGET_TEXT_COLOR": "#fdf6e3",
"WIDGET_SECONDARY_TEXT_COLOR": "#93a1a1",
"WIDGET_BGCOLOR": "#002b36", // Base03
"WIDGET_OUTLINE_COLOR": "#839496", // Base0
"WIDGET_TEXT_COLOR": "#fdf6e3", // Base3
"WIDGET_SECONDARY_TEXT_COLOR": "#93a1a1", // Base1
"LINK_COLOR": "#2aa198",
"EVENT_LINK_COLOR": "#268bd2",
"CONNECTING_LINK_COLOR": "#859900",
"LINK_COLOR": "#2aa198", // Solarized Cyan
"EVENT_LINK_COLOR": "#268bd2", // Solarized Blue
"CONNECTING_LINK_COLOR": "#859900", // Solarized Green
},
"comfy_base": {
"fg-color": "#fdf6e3", // Base3
"bg-color": "#002b36", // Base03
"comfy-menu-bg": "#073642", // Base02
"comfy-input-bg": "#002b36", // Base03
"input-text": "#93a1a1", // Base1
"descrip-text": "#586e75", // Base01
"drag-text": "#839496", // Base0
"error-text": "#dc322f", // Solarized Red
"border-color": "#657b83" // Base00
}
},
}
};
const id = "Comfy.ColorPalette";
const idCustomColorPalettes = "Comfy.CustomColorPalettes";
const defaultColorPaletteId = "palette_1";
const defaultColorPaletteId = "dark";
const els = {}
// const ctxMenu = LiteGraph.ContextMenu;
app.registerExtension({
@ -236,10 +311,12 @@ app.registerExtension({
const loadColorPalette = async (colorPalette) => {
colorPalette = await completeColorPalette(colorPalette);
if (colorPalette.colors) {
// Sets the colors of node slots and links
if (colorPalette.colors.node_slot) {
Object.assign(app.canvas.default_connection_color_byType, colorPalette.colors.node_slot);
Object.assign(LGraphCanvas.link_type_colors, colorPalette.colors.node_slot);
}
// Sets the colors of the LiteGraph objects
if (colorPalette.colors.litegraph_base) {
// Everything updates correctly in the loop, except the Node Title and Link Color for some reason
app.canvas.node_title_color = colorPalette.colors.litegraph_base.NODE_TITLE_COLOR;
@ -251,6 +328,13 @@ app.registerExtension({
}
}
}
// Sets the color of ComfyUI elements
if (colorPalette.colors.comfy_base) {
const rootStyle = document.documentElement.style;
for (const key in colorPalette.colors.comfy_base) {
rootStyle.setProperty('--' + key, colorPalette.colors.comfy_base[key]);
}
}
app.canvas.draw(true, true);
}
};

View File

@ -0,0 +1,146 @@
import { app } from "/scripts/app.js";
// Allows you to edit the attention weight by holding ctrl (or cmd) and using the up/down arrow keys
app.registerExtension({
name: "Comfy.EditAttention",
init() {
const editAttentionDelta = app.ui.settings.addSetting({
id: "Comfy.EditAttention.Delta",
name: "Ctrl+up/down precision",
type: "slider",
attrs: {
min: 0.01,
max: 0.5,
step: 0.01,
},
defaultValue: 0.05,
});
function incrementWeight(weight, delta) {
const floatWeight = parseFloat(weight);
if (isNaN(floatWeight)) return weight;
const newWeight = floatWeight + delta;
if (newWeight < 0) return "0";
return String(Number(newWeight.toFixed(10)));
}
function findNearestEnclosure(text, cursorPos) {
let start = cursorPos, end = cursorPos;
let openCount = 0, closeCount = 0;
// Find opening parenthesis before cursor
while (start >= 0) {
start--;
if (text[start] === "(" && openCount === closeCount) break;
if (text[start] === "(") openCount++;
if (text[start] === ")") closeCount++;
}
if (start < 0) return false;
openCount = 0;
closeCount = 0;
// Find closing parenthesis after cursor
while (end < text.length) {
if (text[end] === ")" && openCount === closeCount) break;
if (text[end] === "(") openCount++;
if (text[end] === ")") closeCount++;
end++;
}
if (end === text.length) return false;
return { start: start + 1, end: end };
}
function addWeightToParentheses(text) {
const parenRegex = /^\((.*)\)$/;
const parenMatch = text.match(parenRegex);
const floatRegex = /:([+-]?(\d*\.)?\d+([eE][+-]?\d+)?)/;
const floatMatch = text.match(floatRegex);
if (parenMatch && !floatMatch) {
return `(${parenMatch[1]}:1.0)`;
} else {
return text;
}
};
function editAttention(event) {
const inputField = event.composedPath()[0];
const delta = parseFloat(editAttentionDelta.value);
if (inputField.tagName !== "TEXTAREA") return;
if (!(event.key === "ArrowUp" || event.key === "ArrowDown")) return;
if (!event.ctrlKey && !event.metaKey) return;
event.preventDefault();
let start = inputField.selectionStart;
let end = inputField.selectionEnd;
let selectedText = inputField.value.substring(start, end);
// If there is no selection, attempt to find the nearest enclosure, or select the current word
if (!selectedText) {
const nearestEnclosure = findNearestEnclosure(inputField.value, start);
if (nearestEnclosure) {
start = nearestEnclosure.start;
end = nearestEnclosure.end;
selectedText = inputField.value.substring(start, end);
} else {
// Select the current word, find the start and end of the word (first space before and after)
const wordStart = inputField.value.substring(0, start).lastIndexOf(" ") + 1;
const wordEnd = inputField.value.substring(end).indexOf(" ");
// If there is no space after the word, select to the end of the string
if (wordEnd === -1) {
end = inputField.value.length;
} else {
end += wordEnd;
}
start = wordStart;
// Remove all punctuation at the end and beginning of the word
while (inputField.value[start].match(/[.,\/#!$%\^&\*;:{}=\-_`~()]/)) {
start++;
}
while (inputField.value[end - 1].match(/[.,\/#!$%\^&\*;:{}=\-_`~()]/)) {
end--;
}
selectedText = inputField.value.substring(start, end);
if (!selectedText) return;
}
}
// If the selection ends with a space, remove it
if (selectedText[selectedText.length - 1] === " ") {
selectedText = selectedText.substring(0, selectedText.length - 1);
end -= 1;
}
// If there are parentheses left and right of the selection, select them
if (inputField.value[start - 1] === "(" && inputField.value[end] === ")") {
start -= 1;
end += 1;
selectedText = inputField.value.substring(start, end);
}
// If the selection is not enclosed in parentheses, add them
if (selectedText[0] !== "(" || selectedText[selectedText.length - 1] !== ")") {
selectedText = `(${selectedText})`;
}
// If the selection does not have a weight, add a weight of 1.0
selectedText = addWeightToParentheses(selectedText);
// Increment the weight
const weightDelta = event.key === "ArrowUp" ? delta : -delta;
const updatedText = selectedText.replace(/(.*:)(\d+(\.\d+)?)(.*)/, (match, prefix, weight, _, suffix) => {
return prefix + incrementWeight(weight, weightDelta) + suffix;
});
inputField.setRangeText(updatedText, start, end, "select");
}
window.addEventListener("keydown", editAttention);
},
});

View File

@ -0,0 +1,76 @@
import { app } from "/scripts/app.js";
const id = "Comfy.Keybinds";
app.registerExtension({
name: id,
init() {
const keybindListener = function(event) {
const modifierPressed = event.ctrlKey || event.metaKey;
// Queue prompt using ctrl or command + enter
if (modifierPressed && (event.key === "Enter" || event.keyCode === 13 || event.keyCode === 10)) {
app.queuePrompt(event.shiftKey ? -1 : 0);
return;
}
const target = event.composedPath()[0];
if (target.tagName === "INPUT" || target.tagName === "TEXTAREA") {
return;
}
const modifierKeyIdMap = {
"s": "#comfy-save-button",
83: "#comfy-save-button",
"o": "#comfy-file-input",
79: "#comfy-file-input",
"Backspace": "#comfy-clear-button",
8: "#comfy-clear-button",
"Delete": "#comfy-clear-button",
46: "#comfy-clear-button",
"d": "#comfy-load-default-button",
68: "#comfy-load-default-button",
};
const modifierKeybindId = modifierKeyIdMap[event.key] || modifierKeyIdMap[event.keyCode];
if (modifierPressed && modifierKeybindId) {
event.preventDefault();
const elem = document.querySelector(modifierKeybindId);
elem.click();
return;
}
// Finished Handling all modifier keybinds, now handle the rest
if (event.ctrlKey || event.altKey || event.metaKey) {
return;
}
// Close out of modals using escape
if (event.key === "Escape" || event.keyCode === 27) {
const modals = document.querySelectorAll(".comfy-modal");
const modal = Array.from(modals).find(modal => window.getComputedStyle(modal).getPropertyValue("display") !== "none");
if (modal) {
modal.style.display = "none";
}
}
const keyIdMap = {
"q": "#comfy-view-queue-button",
81: "#comfy-view-queue-button",
"h": "#comfy-view-history-button",
72: "#comfy-view-history-button",
"r": "#comfy-refresh-button",
82: "#comfy-refresh-button",
};
const buttonId = keyIdMap[event.key] || keyIdMap[event.keyCode];
if (buttonId) {
const button = document.querySelector(buttonId);
button.click();
}
}
window.addEventListener("keydown", keybindListener, true);
}
});

View File

@ -0,0 +1,41 @@
import {app} from "../../scripts/app.js";
import {ComfyWidgets} from "../../scripts/widgets.js";
// Node that add notes to your project
app.registerExtension({
name: "Comfy.NoteNode",
registerCustomNodes() {
class NoteNode {
color=LGraphCanvas.node_colors.yellow.color;
bgcolor=LGraphCanvas.node_colors.yellow.bgcolor;
groupcolor = LGraphCanvas.node_colors.yellow.groupcolor;
constructor() {
if (!this.properties) {
this.properties = {};
this.properties.text="";
}
ComfyWidgets.STRING(this, "", ["", {default:this.properties.text, multiline: true}], app)
this.serialize_widgets = true;
this.isVirtualNode = true;
}
}
// Load default visibility
LiteGraph.registerNodeType(
"Note",
Object.assign(NoteNode, {
title_mode: LiteGraph.NORMAL_TITLE,
title: "Note",
collapsable: true,
})
);
NoteNode.category = "utils";
},
});

View File

@ -90,7 +90,7 @@ app.registerExtension({
const r = onNodeCreated ? onNodeCreated.apply(this, arguments) : undefined;
if (!this.properties || !("Node name for S&R" in this.properties)) {
this.addProperty("Node name for S&R", this.title, "string");
this.addProperty("Node name for S&R", this.constructor.type, "string");
}
return r;

View File

@ -9,7 +9,7 @@ app.registerExtension({
app.ui.settings.addSetting({
id: "Comfy.SnapToGrid.GridSize",
name: "Grid Size",
type: "number",
type: "slider",
attrs: {
min: 1,
max: 500,

View File

@ -1,4 +1,4 @@
import { ComfyWidgets, addRandomizeWidget } from "/scripts/widgets.js";
import { ComfyWidgets, addValueControlWidget } from "/scripts/widgets.js";
import { app } from "/scripts/app.js";
const CONVERTED_TYPE = "converted-widget";
@ -23,7 +23,7 @@ function hideWidget(node, widget, suffix = "") {
return widget.origSerializeValue ? widget.origSerializeValue() : widget.value;
};
// Hide any linked widgets, e.g. seed+randomize
// Hide any linked widgets, e.g. seed+seedControl
if (widget.linkedWidgets) {
for (const w of widget.linkedWidgets) {
hideWidget(node, w, ":" + widget.name);
@ -40,7 +40,7 @@ function showWidget(widget) {
delete widget.origComputeSize;
delete widget.origSerializeValue;
// Hide any linked widgets, e.g. seed+randomize
// Hide any linked widgets, e.g. seed+seedControl
if (widget.linkedWidgets) {
for (const w of widget.linkedWidgets) {
showWidget(w);
@ -159,27 +159,33 @@ app.registerExtension({
const r = origOnInputDblClick ? origOnInputDblClick.apply(this, arguments) : undefined;
const input = this.inputs[slot];
if (input.widget && !input[ignoreDblClick]) {
const node = LiteGraph.createNode("PrimitiveNode");
app.graph.add(node);
// Calculate a position that wont directly overlap another node
const pos = [this.pos[0] - node.size[0] - 30, this.pos[1]];
while (isNodeAtPos(pos)) {
pos[1] += LiteGraph.NODE_TITLE_HEIGHT;
if (!input.widget || !input[ignoreDblClick]) {
// Not a widget input or already handled input
if (!(input.type in ComfyWidgets) && !(input.widget.config?.[0] instanceof Array)) {
return r; //also Not a ComfyWidgets input or combo (do nothing)
}
node.pos = pos;
node.connect(0, this, slot);
node.title = input.name;
// Prevent adding duplicates due to triple clicking
input[ignoreDblClick] = true;
setTimeout(() => {
delete input[ignoreDblClick];
}, 300);
}
// Create a primitive node
const node = LiteGraph.createNode("PrimitiveNode");
app.graph.add(node);
// Calculate a position that wont directly overlap another node
const pos = [this.pos[0] - node.size[0] - 30, this.pos[1]];
while (isNodeAtPos(pos)) {
pos[1] += LiteGraph.NODE_TITLE_HEIGHT;
}
node.pos = pos;
node.connect(0, this, slot);
node.title = input.name;
// Prevent adding duplicates due to triple clicking
input[ignoreDblClick] = true;
setTimeout(() => {
delete input[ignoreDblClick];
}, 300);
return r;
};
},
@ -233,7 +239,9 @@ app.registerExtension({
// Fires before the link is made allowing us to reject it if it isn't valid
// No widget, we cant connect
if (!input.widget) return false;
if (!input.widget) {
if (!(input.type in ComfyWidgets)) return false;
}
if (this.outputs[slot].links?.length) {
return this.#isValidConnection(input);
@ -252,9 +260,17 @@ app.registerExtension({
const input = theirNode.inputs[link.target_slot];
if (!input) return;
const widget = input.widget;
const { type, linkType } = getWidgetType(widget.config);
var _widget;
if (!input.widget) {
if (!(input.type in ComfyWidgets)) return;
_widget = { "name": input.name, "config": [input.type, {}] }//fake widget
} else {
_widget = input.widget;
}
const widget = _widget;
const { type, linkType } = getWidgetType(widget.config);
// Update our output to restrict to the widget type
this.outputs[0].type = linkType;
this.outputs[0].name = type;
@ -274,7 +290,7 @@ app.registerExtension({
if (type in ComfyWidgets) {
widget = (ComfyWidgets[type](this, "value", inputData, app) || {}).widget;
} else {
widget = this.addWidget(type, "value", null, () => {}, {});
widget = this.addWidget(type, "value", null, () => { }, {});
}
if (node?.widgets && widget) {
@ -285,7 +301,7 @@ app.registerExtension({
}
if (widget.type === "number") {
addRandomizeWidget(this, widget, "Random after every gen");
addValueControlWidget(this, widget, "fixed");
}
// When our value changes, update other widgets to reflect our changes
@ -319,7 +335,20 @@ app.registerExtension({
const config1 = this.outputs[0].widget.config;
const config2 = input.widget.config;
if (config1[0] !== config2[0]) return false;
if (config1[0] instanceof Array) {
// These checks shouldnt actually be necessary as the types should match
// but double checking doesn't hurt
// New input isnt a combo
if (!(config2[0] instanceof Array)) return false;
// New imput combo has a different size
if (config1[0].length !== config2[0].length) return false;
// New input combo has different elements
if (config1[0].find((v, i) => config2[0][i] !== v)) return false;
} else if (config1[0] !== config2[0]) {
// Configs dont match
return false;
}
for (const k in config1[1]) {
if (k !== "default") {

View File

@ -142,6 +142,8 @@
pointerevents_method: "pointer", // "mouse"|"pointer" use mouse for retrocompatibility issues? (none found @ now)
// TODO implement pointercancel, gotpointercapture, lostpointercapture, (pointerover, pointerout if necessary)
ctrl_shift_v_paste_connect_unselected_outputs: true, //[true!] allows ctrl + shift + v to paste nodes with the outputs of the unselected nodes connected with the inputs of the newly pasted nodes
/**
* Register a node class so it can be listed when the user wants to create a new one
* @method registerNodeType
@ -253,13 +255,18 @@
* @param {String|Object} type name of the node or the node constructor itself
*/
unregisterNodeType: function(type) {
var base_class = type.constructor === String ? this.registered_node_types[type] : type;
if(!base_class)
throw("node type not found: " + type );
delete this.registered_node_types[base_class.type];
if(base_class.constructor.name)
delete this.Nodes[base_class.constructor.name];
},
const base_class =
type.constructor === String
? this.registered_node_types[type]
: type;
if (!base_class) {
throw "node type not found: " + type;
}
delete this.registered_node_types[base_class.type];
if (base_class.constructor.name) {
delete this.Nodes[base_class.constructor.name];
}
},
/**
* Save a slot type and his node
@ -267,38 +274,49 @@
* @param {String|Object} type name of the node or the node constructor itself
* @param {String} slot_type name of the slot type (variable type), eg. string, number, array, boolean, ..
*/
registerNodeAndSlotType: function(type,slot_type,out){
registerNodeAndSlotType: function(type, slot_type, out){
out = out || false;
var base_class = type.constructor === String && this.registered_node_types[type] !== "anonymous" ? this.registered_node_types[type] : type;
var sCN = base_class.constructor.type;
if (typeof slot_type == "string"){
var aTypes = slot_type.split(",");
}else if (slot_type == this.EVENT || slot_type == this.ACTION){
var aTypes = ["_event_"];
}else{
var aTypes = ["*"];
const base_class =
type.constructor === String &&
this.registered_node_types[type] !== "anonymous"
? this.registered_node_types[type]
: type;
const class_type = base_class.constructor.type;
let allTypes = [];
if (typeof slot_type === "string") {
allTypes = slot_type.split(",");
} else if (slot_type == this.EVENT || slot_type == this.ACTION) {
allTypes = ["_event_"];
} else {
allTypes = ["*"];
}
for (var i = 0; i < aTypes.length; ++i) {
var sT = aTypes[i]; //.toLowerCase();
if (sT === ""){
sT = "*";
for (let i = 0; i < allTypes.length; ++i) {
let slotType = allTypes[i];
if (slotType === "") {
slotType = "*";
}
var registerTo = out ? "registered_slot_out_types" : "registered_slot_in_types";
if (typeof this[registerTo][sT] == "undefined") this[registerTo][sT] = {nodes: []};
this[registerTo][sT].nodes.push(sCN);
const registerTo = out
? "registered_slot_out_types"
: "registered_slot_in_types";
if (this[registerTo][slotType] === undefined) {
this[registerTo][slotType] = { nodes: [] };
}
if (!this[registerTo][slotType].nodes.includes(class_type)) {
this[registerTo][slotType].nodes.push(class_type);
}
// check if is a new type
if (!out){
if (!this.slot_types_in.includes(sT.toLowerCase())){
this.slot_types_in.push(sT.toLowerCase());
if (!out) {
if (!this.slot_types_in.includes(slotType.toLowerCase())) {
this.slot_types_in.push(slotType.toLowerCase());
this.slot_types_in.sort();
}
}else{
if (!this.slot_types_out.includes(sT.toLowerCase())){
this.slot_types_out.push(sT.toLowerCase());
} else {
if (!this.slot_types_out.includes(slotType.toLowerCase())) {
this.slot_types_out.push(slotType.toLowerCase());
this.slot_types_out.sort();
}
}
@ -1616,7 +1634,8 @@
var nRet = null;
for (var i = nodes_list.length - 1; i >= 0; i--) {
var n = nodes_list[i];
if (n.isPointInside(x, y, margin)) {
var skip_title = n.constructor.title_mode == LiteGraph.NO_TITLE;
if (n.isPointInside(x, y, margin, skip_title)) {
// check for lesser interest nodes (TODO check for overlapping, use the top)
/*if (typeof n == "LGraphGroup"){
nRet = n;
@ -3967,8 +3986,8 @@
var aSource = (type+"").toLowerCase().split(",");
var aDest = aSlots[i].type=="0"||aSlots[i].type=="*"?"0":aSlots[i].type;
aDest = (aDest+"").toLowerCase().split(",");
for(sI=0;sI<aSource.length;sI++){
for(dI=0;dI<aDest.length;dI++){
for(var sI=0;sI<aSource.length;sI++){
for(var dI=0;dI<aDest.length;dI++){
if (aSource[sI]=="_event_") aSource[sI] = LiteGraph.EVENT;
if (aDest[sI]=="_event_") aDest[sI] = LiteGraph.EVENT;
if (aSource[sI]=="*") aSource[sI] = 0;
@ -3987,8 +4006,8 @@
var aSource = (type+"").toLowerCase().split(",");
var aDest = aSlots[i].type=="0"||aSlots[i].type=="*"?"0":aSlots[i].type;
aDest = (aDest+"").toLowerCase().split(",");
for(sI=0;sI<aSource.length;sI++){
for(dI=0;dI<aDest.length;dI++){
for(var sI=0;sI<aSource.length;sI++){
for(var dI=0;dI<aDest.length;dI++){
if (aSource[sI]=="*") aSource[sI] = 0;
if (aDest[sI]=="*") aDest[sI] = 0;
if (aSource[sI] == aDest[dI]) {
@ -4019,7 +4038,7 @@
if (target_node && target_node.constructor === Number) {
target_node = this.graph.getNodeById(target_node);
}
target_slot = target_node.findInputSlotByType(target_slotType, false, true);
var target_slot = target_node.findInputSlotByType(target_slotType, false, true);
if (target_slot >= 0 && target_slot !== null){
//console.debug("CONNbyTYPE type "+target_slotType+" for "+target_slot)
return this.connect(slot, target_node, target_slot);
@ -4072,7 +4091,7 @@
if (source_node && source_node.constructor === Number) {
source_node = this.graph.getNodeById(source_node);
}
source_slot = source_node.findOutputSlotByType(source_slotType, false, true);
var source_slot = source_node.findOutputSlotByType(source_slotType, false, true);
if (source_slot >= 0 && source_slot !== null){
//console.debug("CONNbyTYPE OUT! type "+source_slotType+" for "+source_slot)
return source_node.connect(source_slot, this, slot);
@ -5184,6 +5203,7 @@ LGraphNode.prototype.executeAction = function(action)
this.editor_alpha = 1; //used for transition
this.pause_rendering = false;
this.clear_background = true;
this.clear_background_color = "#222";
this.read_only = false; //if set to true users cannot modify the graph
this.render_only_selected = true;
@ -6986,7 +7006,7 @@ LGraphNode.prototype.executeAction = function(action)
block_default = true;
}
if (e.code == "KeyC" && (e.metaKey || e.ctrlKey) && !e.shiftKey) {
if ((e.keyCode === 67) && (e.metaKey || e.ctrlKey) && !e.shiftKey) {
//copy
if (this.selected_nodes) {
this.copyToClipboard();
@ -6994,9 +7014,9 @@ LGraphNode.prototype.executeAction = function(action)
}
}
if (e.code == "KeyV" && (e.metaKey || e.ctrlKey) && !e.shiftKey) {
if ((e.keyCode === 86) && (e.metaKey || e.ctrlKey)) {
//paste
this.pasteFromClipboard();
this.pasteFromClipboard(e.shiftKey);
}
//delete or backspace
@ -7081,15 +7101,15 @@ LGraphNode.prototype.executeAction = function(action)
var target_node = this.graph.getNodeById(
link_info.origin_id
);
if (!target_node || !this.selected_nodes[target_node.id]) {
//improve this by allowing connections to non-selected nodes
if (!target_node) {
continue;
} //not selected
}
clipboard_info.links.push([
target_node._relative_id,
link_info.origin_slot, //j,
node._relative_id,
link_info.target_slot
link_info.target_slot,
target_node.id
]);
}
}
@ -7100,7 +7120,11 @@ LGraphNode.prototype.executeAction = function(action)
);
};
LGraphCanvas.prototype.pasteFromClipboard = function() {
LGraphCanvas.prototype.pasteFromClipboard = function(isConnectUnselected = false) {
// if ctrl + shift + v is off, return when isConnectUnselected is true (shift is pressed) to maintain old behavior
if (!LiteGraph.ctrl_shift_v_paste_connect_unselected_outputs && isConnectUnselected) {
return;
}
var data = localStorage.getItem("litegrapheditor_clipboard");
if (!data) {
return;
@ -7149,7 +7173,16 @@ LGraphNode.prototype.executeAction = function(action)
//create links
for (var i = 0; i < clipboard_info.links.length; ++i) {
var link_info = clipboard_info.links[i];
var origin_node = nodes[link_info[0]];
var origin_node;
var origin_node_relative_id = link_info[0];
if (origin_node_relative_id != null) {
origin_node = nodes[origin_node_relative_id];
} else if (LiteGraph.ctrl_shift_v_paste_connect_unselected_outputs && isConnectUnselected) {
var origin_node_id = link_info[4];
if (origin_node_id) {
origin_node = this.graph.getNodeById(origin_node_id);
}
}
var target_node = nodes[link_info[2]];
if( origin_node && target_node )
origin_node.connect(link_info[1], target_node, link_info[3]);
@ -8212,6 +8245,17 @@ LGraphNode.prototype.executeAction = function(action)
this.ds.toCanvasContext(ctx);
//render BG
if ( this.ds.scale < 1.5 && !bg_already_painted && this.clear_background_color )
{
ctx.fillStyle = this.clear_background_color;
ctx.fillRect(
this.visible_area[0],
this.visible_area[1],
this.visible_area[2],
this.visible_area[3]
);
}
if (
this.background_image &&
this.ds.scale > 0.5 &&
@ -12274,7 +12318,7 @@ LGraphNode.prototype.executeAction = function(action)
var aProps = LiteGraph.availableCanvasOptions;
aProps.sort();
for(pI in aProps){
for(var pI in aProps){
var pX = aProps[pI];
panel.addWidget( "boolean", pX, graphcanvas[pX], {key: pX, on: "True", off: "False"}, fUpdate);
}

View File

@ -4,27 +4,48 @@ import { api } from "./api.js";
import { defaultGraph } from "./defaultGraph.js";
import { getPngMetadata, importA1111 } from "./pnginfo.js";
class ComfyApp {
/**
* List of {number, batchCount} entries to queue
/**
* @typedef {import("types/comfy").ComfyExtension} ComfyExtension
*/
export class ComfyApp {
/**
* List of entries to queue
* @type {{number: number, batchCount: number}[]}
*/
#queueItems = [];
/**
* If the queue is currently being processed
* @type {boolean}
*/
#processingQueue = false;
constructor() {
this.ui = new ComfyUI(this);
/**
* List of extensions that are registered with the app
* @type {ComfyExtension[]}
*/
this.extensions = [];
/**
* Stores the execution output data for each node
* @type {Record<string, any>}
*/
this.nodeOutputs = {};
/**
* If the shift key on the keyboard is pressed
* @type {boolean}
*/
this.shiftDown = false;
}
/**
* Invoke an extension callback
* @param {string} method The extension callback to execute
* @param {...any} args Any arguments to pass to the callback
* @param {keyof ComfyExtension} method The extension callback to execute
* @param {any[]} args Any arguments to pass to the callback
* @returns
*/
#invokeExtensions(method, ...args) {
@ -362,8 +383,20 @@ class ComfyApp {
if (n && n.onDragDrop && (await n.onDragDrop(event))) {
return;
}
// Dragging from Chrome->Firefox there is a file but its a bmp, so ignore that
if (event.dataTransfer.files.length && event.dataTransfer.files[0].type !== "image/bmp") {
await this.handleFile(event.dataTransfer.files[0]);
} else {
// Try loading the first URI in the transfer list
const validTypes = ["text/uri-list", "text/x-moz-url"];
const match = [...event.dataTransfer.types].find((t) => validTypes.find(v => t === v));
if (match) {
const uri = event.dataTransfer.getData(match)?.split("\n")?.[0];
if (uri) {
await this.handleFile(await (await fetch(uri)).blob());
}
}
}
});
// Always clear over node on drag leave
@ -679,11 +712,6 @@ class ComfyApp {
#addKeyboardHandler() {
window.addEventListener("keydown", (e) => {
this.shiftDown = e.shiftKey;
// Queue prompt using ctrl or command + enter
if ((e.ctrlKey || e.metaKey) && (e.key === "Enter" || e.keyCode === 13 || e.keyCode === 10)) {
this.queuePrompt(e.shiftKey ? -1 : 0);
}
});
window.addEventListener("keyup", (e) => {
this.shiftDown = e.shiftKey;
@ -938,6 +966,15 @@ class ComfyApp {
}
}
}
if (node.type == "KSampler" || node.type == "KSamplerAdvanced" || node.type == "PrimitiveNode") {
if (widget.name == "control_after_generate") {
if (widget.value === true) {
widget.value = "randomize";
} else if (widget.value === false) {
widget.value = "fixed";
}
}
}
}
}
@ -1090,7 +1127,7 @@ class ComfyApp {
importA1111(this.graph, pngInfo.parameters);
}
}
} else if (file.type === "application/json" || file.name.endsWith(".json")) {
} else if (file.type === "application/json" || file.name?.endsWith(".json")) {
const reader = new FileReader();
reader.onload = () => {
this.loadGraphData(JSON.parse(reader.result));
@ -1099,6 +1136,10 @@ class ComfyApp {
}
}
/**
* Registers a Comfy web extension with the app
* @param {ComfyExtension} extension
*/
registerExtension(extension) {
if (!extension.name) {
throw new Error("Extensions must have a 'name' property.");

View File

@ -131,6 +131,7 @@ export async function importA1111(graph, parameters) {
}
function replaceEmbeddings(text) {
if(!embeddings.length) return text;
return text.replaceAll(
new RegExp(
"\\b(" + embeddings.map((e) => e.replace(/[.*+?^${}()|[\]\\]/g, "\\$&")).join("\\b|\\b") + ")\\b",

View File

@ -270,6 +270,30 @@ class ComfySettingsDialog extends ComfyDialog {
]),
]);
break;
case "slider":
element = $el("div", [
$el("label", { textContent: name }, [
$el("input", {
type: "range",
value,
oninput: (e) => {
setter(e.target.value);
e.target.nextElementSibling.value = e.target.value;
},
...attrs
}),
$el("input", {
type: "number",
value,
oninput: (e) => {
setter(e.target.value);
e.target.previousElementSibling.value = e.target.value;
},
...attrs
}),
]),
]);
break;
default:
console.warn("Unsupported setting type, defaulting to text");
element = $el("div", [
@ -431,7 +455,15 @@ export class ComfyUI {
defaultValue: true,
});
const promptFilename = this.settings.addSetting({
id: "Comfy.PromptFilename",
name: "Prompt for filename when saving workflow",
type: "boolean",
defaultValue: true,
});
const fileInput = $el("input", {
id: "comfy-file-input",
type: "file",
accept: ".json,image/png",
style: { display: "none" },
@ -448,6 +480,7 @@ export class ComfyUI {
$el("button.comfy-settings-btn", { textContent: "⚙️", onclick: () => this.settings.show() }),
]),
$el("button.comfy-queue-btn", {
id: "queue-button",
textContent: "Queue Prompt",
onclick: () => app.queuePrompt(0, this.batchCount),
}),
@ -496,9 +529,10 @@ export class ComfyUI {
]),
]),
$el("div.comfy-menu-btns", [
$el("button", { textContent: "Queue Front", onclick: () => app.queuePrompt(-1, this.batchCount) }),
$el("button", { id: "queue-front-button", textContent: "Queue Front", onclick: () => app.queuePrompt(-1, this.batchCount) }),
$el("button", {
$: (b) => (this.queue.button = b),
id: "comfy-view-queue-button",
textContent: "View Queue",
onclick: () => {
this.history.hide();
@ -507,6 +541,7 @@ export class ComfyUI {
}),
$el("button", {
$: (b) => (this.history.button = b),
id: "comfy-view-history-button",
textContent: "View History",
onclick: () => {
this.queue.hide();
@ -517,14 +552,23 @@ export class ComfyUI {
this.queue.element,
this.history.element,
$el("button", {
id: "comfy-save-button",
textContent: "Save",
onclick: () => {
let filename = "workflow.json";
if (promptFilename.value) {
filename = prompt("Save workflow as:", filename);
if (!filename) return;
if (!filename.toLowerCase().endsWith(".json")) {
filename += ".json";
}
}
const json = JSON.stringify(app.graph.serialize(), null, 2); // convert the data to a JSON string
const blob = new Blob([json], { type: "application/json" });
const url = URL.createObjectURL(blob);
const a = $el("a", {
href: url,
download: "workflow.json",
download: filename,
style: { display: "none" },
parent: document.body,
});
@ -535,15 +579,15 @@ export class ComfyUI {
}, 0);
},
}),
$el("button", { textContent: "Load", onclick: () => fileInput.click() }),
$el("button", { textContent: "Refresh", onclick: () => app.refreshComboInNodes() }),
$el("button", { textContent: "Clear", onclick: () => {
$el("button", { id: "comfy-load-button", textContent: "Load", onclick: () => fileInput.click() }),
$el("button", { id: "comfy-refresh-button", textContent: "Refresh", onclick: () => app.refreshComboInNodes() }),
$el("button", { id: "comfy-clear-button", textContent: "Clear", onclick: () => {
if (!confirmClear.value || confirm("Clear workflow?")) {
app.clean();
app.graph.clear();
}
}}),
$el("button", { textContent: "Load Default", onclick: () => {
$el("button", { id: "comfy-load-default-button", textContent: "Load Default", onclick: () => {
if (!confirmClear.value || confirm("Load default workflow?")) {
app.loadGraphData()
}

View File

@ -10,37 +10,54 @@ function getNumberDefaults(inputData, defaultStep) {
return { val: defaultVal, config: { min, max, step: 10.0 * step } };
}
export function addRandomizeWidget(node, targetWidget, name, defaultValue = false) {
const randomize = node.addWidget("toggle", name, defaultValue, function (v) {}, {
on: "enabled",
off: "disabled",
serialize: false, // Don't include this in prompt.
});
export function addValueControlWidget(node, targetWidget, defaultValue = "randomize", values) {
const valueControl = node.addWidget("combo", "control_after_generate", defaultValue, function (v) { }, {
values: ["fixed", "increment", "decrement", "randomize"],
serialize: false, // Don't include this in prompt.
});
valueControl.afterQueued = () => {
randomize.afterQueued = () => {
if (randomize.value) {
const min = targetWidget.options?.min;
let max = targetWidget.options?.max;
if (min != null || max != null) {
if (max) {
// limit max to something that javascript can handle
max = Math.min(1125899906842624, max);
}
targetWidget.value = Math.floor(Math.random() * ((max ?? 9999999999) - (min ?? 0) + 1) + (min ?? 0));
} else {
targetWidget.value = Math.floor(Math.random() * 1125899906842624);
}
var v = valueControl.value;
let min = targetWidget.options.min;
let max = targetWidget.options.max;
// limit to something that javascript can handle
max = Math.min(1125899906842624, max);
min = Math.max(-1125899906842624, min);
let range = (max - min) / (targetWidget.options.step / 10);
//adjust values based on valueControl Behaviour
switch (v) {
case "fixed":
break;
case "increment":
targetWidget.value += targetWidget.options.step / 10;
break;
case "decrement":
targetWidget.value -= targetWidget.options.step / 10;
break;
case "randomize":
targetWidget.value = Math.floor(Math.random() * range) * (targetWidget.options.step / 10) + min;
default:
break;
}
};
return randomize;
}
/*check if values are over or under their respective
* ranges and set them to min or max.*/
if (targetWidget.value < min)
targetWidget.value = min;
if (targetWidget.value > max)
targetWidget.value = max;
}
return valueControl;
};
function seedWidget(node, inputName, inputData) {
const seed = ComfyWidgets.INT(node, inputName, inputData);
const randomize = addRandomizeWidget(node, seed.widget, "Random seed after every gen", true);
const seedControl = addValueControlWidget(node, seed.widget, "randomize");
seed.widget.linkedWidgets = [randomize];
return { widget: seed, randomize };
seed.widget.linkedWidgets = [seedControl];
return seed;
}
const MultilineSymbol = Symbol();

View File

@ -1,6 +1,13 @@
:root {
--fg-color: #000;
--bg-color: #fff;
--comfy-menu-bg: #353535;
--comfy-input-bg: #222;
--input-text: #ddd;
--descrip-text: #999;
--drag-text: #ccc;
--error-text: #ff4444;
--border-color: #4e4e4e;
}
@media (prefers-color-scheme: dark) {
@ -25,8 +32,8 @@ body {
}
.comfy-multiline-input {
background-color: var(--bg-color);
color: var(--fg-color);
background-color: var(--comfy-input-bg);
color: var(--input-text);
overflow: hidden;
overflow-y: auto;
padding: 2px;
@ -39,8 +46,8 @@ body {
position: fixed; /* Stay in place */
z-index: 100; /* Sit on top */
padding: 30px 30px 10px 30px;
background-color: #353535; /* Modal background */
color: #ff4444;
background-color: var(--comfy-menu-bg); /* Modal background */
color: var(--error-text);
box-shadow: 0px 0px 20px #888888;
border-radius: 10px;
top: 50%;
@ -82,8 +89,8 @@ body {
display: flex;
flex-direction: column;
align-items: center;
color: #999;
background-color: #353535;
color: var(--descrip-text);
background-color: var(--comfy-menu-bg);
font-family: sans-serif;
padding: 10px;
border-radius: 0 8px 8px 8px;
@ -103,7 +110,7 @@ body {
.comfy-menu-btns button {
font-size: 10px;
width: 50%;
color: #999 !important;
color: var(--descrip-text) !important;
}
.comfy-menu > button {
@ -114,10 +121,10 @@ body {
.comfy-menu-btns button,
.comfy-menu .comfy-list button,
.comfy-modal button{
color: #ddd;
background-color: #222;
color: var(--input-text);
background-color: var(--comfy-input-bg);
border-radius: 8px;
border-color: #4e4e4e;
border-color: var(--border-color);
border-style: solid;
margin-top: 2px;
}
@ -136,7 +143,7 @@ body {
font-size: 12px;
font-family: sans-serif;
letter-spacing: 2px;
color: #cccccc;
color: var(--drag-text);
text-shadow: 1px 0 1px black;
position: absolute;
top: 0;
@ -152,10 +159,10 @@ body {
}
.comfy-list {
color: #999;
background-color: #333;
color: var(--descrip-text);
background-color: var(--comfy-menu-bg);
margin-bottom: 10px;
border-color: #4e4e4e;
border-color: var(--border-color);
border-style: solid;
}
@ -163,7 +170,7 @@ body {
overflow-y: scroll;
max-height: 100px;
min-height: 25px;
background-color: #222;
background-color: var(--comfy-input-bg);
padding: 5px;
}
@ -206,16 +213,24 @@ button.comfy-queue-btn {
.comfy-modal.comfy-manage-templates {
text-align: center;
font-family: sans-serif;
color: #999;
color: var(--descrip-text);
z-index: 99;
}
.comfy-modal.comfy-settings input[type="range"] {
vertical-align: middle;
}
.comfy-modal.comfy-settings input[type="range"] + input[type="number"] {
width: 3.5em;
}
.comfy-modal input,
.comfy-modal select {
color: #ddd;
background-color: #222;
color: var(--input-text);
background-color: var(--comfy-input-bg);
border-radius: 8px;
border-color: #4e4e4e;
border-color: var(--border-color);
border-style: solid;
font-size: inherit;
}
@ -240,7 +255,7 @@ button.comfy-queue-btn {
.graphdialog .name {
font-size: 14px;
font-family: sans-serif;
color: #999999;
color: var(--descrip-text);
}
.graphdialog button {
@ -251,10 +266,10 @@ button.comfy-queue-btn {
}
.graphdialog input, .graphdialog textarea, .graphdialog select {
background-color: #222;
background-color: var(--comfy-input-bg);
border: 2px solid;
border-color: #444444;
color: #ddd;
border-color: var(--border-color);
color: var(--input-text);
border-radius: 12px 0 0 12px;
}

78
web/types/comfy.d.ts vendored Normal file
View File

@ -0,0 +1,78 @@
import { LGraphNode, IWidget } from "./litegraph";
import { ComfyApp } from "/scripts/app";
export interface ComfyExtension {
/**
* The name of the extension
*/
name: string;
/**
* Allows any initialisation, e.g. loading resources. Called after the canvas is created but before nodes are added
* @param app The ComfyUI app instance
*/
init(app: ComfyApp): Promise<void>;
/**
* Allows any additonal setup, called after the application is fully set up and running
* @param app The ComfyUI app instance
*/
setup(app: ComfyApp): Promise<void>;
/**
* Called before nodes are registered with the graph
* @param defs The collection of node definitions, add custom ones or edit existing ones
* @param app The ComfyUI app instance
*/
addCustomNodeDefs(defs: Record<string, ComfyObjectInfo>, app: ComfyApp): Promise<void>;
/**
* Allows the extension to add custom widgets
* @param app The ComfyUI app instance
* @returns An array of {[widget name]: widget data}
*/
getCustomWidgets(
app: ComfyApp
): Promise<
Array<
Record<string, (node, inputName, inputData, app) => { widget?: IWidget; minWidth?: number; minHeight?: number }>
>
>;
/**
* Allows the extension to add additional handling to the node before it is registered with LGraph
* @param nodeType The node class (not an instance)
* @param nodeData The original node object info config object
* @param app The ComfyUI app instance
*/
beforeRegisterNodeDef(nodeType: typeof LGraphNode, nodeData: ComfyObjectInfo, app: ComfyApp): Promise<void>;
/**
* Allows the extension to register additional nodes with LGraph after standard nodes are added
* @param app The ComfyUI app instance
*/
registerCustomNodes(app: ComfyApp): Promise<void>;
/**
* Allows the extension to modify a node that has been reloaded onto the graph.
* If you break something in the backend and want to patch workflows in the frontend
* This is the place to do this
* @param node The node that has been loaded
* @param app The ComfyUI app instance
*/
loadedGraphNode(node: LGraphNode, app: ComfyApp);
/**
* Allows the extension to run code after the constructor of the node
* @param node The node that has been created
* @param app The ComfyUI app instance
*/
nodeCreated(node: LGraphNode, app: ComfyApp);
}
export type ComfyObjectInfo = {
name: string;
display_name?: string;
description?: string;
category: string;
input?: {
required?: Record<string, ComfyObjectInfoConfig>;
optional?: Record<string, ComfyObjectInfoConfig>;
};
output?: string[];
output_name: string[];
};
export type ComfyObjectInfoConfig = [string | any[]] | [string | any[], any];

1506
web/types/litegraph.d.ts vendored Normal file

File diff suppressed because it is too large Load Diff