Merge remote-tracking branch 'upstream/master' into seedControls

merging
This commit is contained in:
FizzleDorf 2023-03-31 13:40:38 -04:00
commit e3657c7460
18 changed files with 727 additions and 91 deletions

View File

@ -25,3 +25,7 @@ To update the ComfyUI code: update\update_comfyui.bat
To update ComfyUI with the python dependencies, note that you should ONLY run this if you have issues with python dependencies.
update\update_comfyui_and_python_dependencies.bat
TO SHARE MODELS BETWEEN COMFYUI AND ANOTHER UI:
In the ComfyUI directory you will find a file: extra_model_paths.yaml.example
Rename this file to: extra_model_paths.yaml and edit it with your favorite text editor.

View File

@ -30,6 +30,11 @@ This ui will let you design and execute advanced stable diffusion pipelines usin
Workflow examples can be found on the [Examples page](https://comfyanonymous.github.io/ComfyUI_examples/)
## Shortcuts
- **Ctrl + A** select all nodes
- **Ctrl + M** mute/unmute selected nodes
- **Delete** or **Backspace** delete selected nodes
# Installing
## Windows
@ -40,6 +45,10 @@ There is a portable standalone build for Windows that should work for running on
Just download, extract and run. Make sure you put your Stable Diffusion checkpoints/models (the huge ckpt/safetensors files) in: ComfyUI\models\checkpoints
#### How do I share models between another UI and ComfyUI?
See the [Config file](extra_model_paths.yaml.example) to set the search paths for models. In the standalone windows build you can find this file in the ComfyUI directory. Rename this file to extra_model_paths.yaml and edit it with your favorite text editor.
## Colab Notebook
To run it on colab or paperspace you can use my [Colab Notebook](notebooks/comfyui_colab.ipynb) here: [Link to open with google colab](https://colab.research.google.com/github/comfyanonymous/ComfyUI/blob/master/notebooks/comfyui_colab.ipynb)
@ -64,7 +73,7 @@ AMD users can install rocm and pytorch with pip if you don't have it already ins
Nvidia users should install torch and xformers using this command:
```pip install torch==1.13.1 torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu117 xformers```
```pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu118 xformers```
#### Troubleshooting
@ -97,7 +106,6 @@ With cmd.exe: ```"path_to_other_sd_gui\venv\Scripts\activate.bat"```
And then you can use that terminal to run Comfyui without installing any dependencies. Note that the venv folder might be called something else depending on the SD UI.
# Running
```python main.py```

View File

@ -78,7 +78,7 @@ class DDIMSampler(object):
dynamic_threshold=None,
ucg_schedule=None,
denoise_function=None,
cond_concat=None,
extra_args=None,
to_zero=True,
end_step=None,
**kwargs
@ -101,7 +101,7 @@ class DDIMSampler(object):
dynamic_threshold=dynamic_threshold,
ucg_schedule=ucg_schedule,
denoise_function=denoise_function,
cond_concat=cond_concat,
extra_args=extra_args,
to_zero=to_zero,
end_step=end_step
)
@ -174,7 +174,7 @@ class DDIMSampler(object):
dynamic_threshold=dynamic_threshold,
ucg_schedule=ucg_schedule,
denoise_function=None,
cond_concat=None
extra_args=None
)
return samples, intermediates
@ -185,7 +185,7 @@ class DDIMSampler(object):
mask=None, x0=None, img_callback=None, log_every_t=100,
temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,
unconditional_guidance_scale=1., unconditional_conditioning=None, dynamic_threshold=None,
ucg_schedule=None, denoise_function=None, cond_concat=None, to_zero=True, end_step=None):
ucg_schedule=None, denoise_function=None, extra_args=None, to_zero=True, end_step=None):
device = self.model.betas.device
b = shape[0]
if x_T is None:
@ -225,7 +225,7 @@ class DDIMSampler(object):
corrector_kwargs=corrector_kwargs,
unconditional_guidance_scale=unconditional_guidance_scale,
unconditional_conditioning=unconditional_conditioning,
dynamic_threshold=dynamic_threshold, denoise_function=denoise_function, cond_concat=cond_concat)
dynamic_threshold=dynamic_threshold, denoise_function=denoise_function, extra_args=extra_args)
img, pred_x0 = outs
if callback: callback(i)
if img_callback: img_callback(pred_x0, i)
@ -249,11 +249,11 @@ class DDIMSampler(object):
def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,
temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,
unconditional_guidance_scale=1., unconditional_conditioning=None,
dynamic_threshold=None, denoise_function=None, cond_concat=None):
dynamic_threshold=None, denoise_function=None, extra_args=None):
b, *_, device = *x.shape, x.device
if denoise_function is not None:
model_output = denoise_function(self.model.apply_model, x, t, unconditional_conditioning, c, unconditional_guidance_scale, cond_concat)
model_output = denoise_function(self.model.apply_model, x, t, **extra_args)
elif unconditional_conditioning is None or unconditional_guidance_scale == 1.:
model_output = self.model.apply_model(x, t, c)
else:

View File

@ -1317,12 +1317,12 @@ class DiffusionWrapper(torch.nn.Module):
self.conditioning_key = conditioning_key
assert self.conditioning_key in [None, 'concat', 'crossattn', 'hybrid', 'adm', 'hybrid-adm', 'crossattn-adm']
def forward(self, x, t, c_concat: list = None, c_crossattn: list = None, c_adm=None, control=None):
def forward(self, x, t, c_concat: list = None, c_crossattn: list = None, c_adm=None, control=None, transformer_options={}):
if self.conditioning_key is None:
out = self.diffusion_model(x, t, control=control)
out = self.diffusion_model(x, t, control=control, transformer_options=transformer_options)
elif self.conditioning_key == 'concat':
xc = torch.cat([x] + c_concat, dim=1)
out = self.diffusion_model(xc, t, control=control)
out = self.diffusion_model(xc, t, control=control, transformer_options=transformer_options)
elif self.conditioning_key == 'crossattn':
if not self.sequential_cross_attn:
cc = torch.cat(c_crossattn, 1)
@ -1332,25 +1332,25 @@ class DiffusionWrapper(torch.nn.Module):
# TorchScript changes names of the arguments
# with argument cc defined as context=cc scripted model will produce
# an error: RuntimeError: forward() is missing value for argument 'argument_3'.
out = self.scripted_diffusion_model(x, t, cc, control=control)
out = self.scripted_diffusion_model(x, t, cc, control=control, transformer_options=transformer_options)
else:
out = self.diffusion_model(x, t, context=cc, control=control)
out = self.diffusion_model(x, t, context=cc, control=control, transformer_options=transformer_options)
elif self.conditioning_key == 'hybrid':
xc = torch.cat([x] + c_concat, dim=1)
cc = torch.cat(c_crossattn, 1)
out = self.diffusion_model(xc, t, context=cc, control=control)
out = self.diffusion_model(xc, t, context=cc, control=control, transformer_options=transformer_options)
elif self.conditioning_key == 'hybrid-adm':
assert c_adm is not None
xc = torch.cat([x] + c_concat, dim=1)
cc = torch.cat(c_crossattn, 1)
out = self.diffusion_model(xc, t, context=cc, y=c_adm, control=control)
out = self.diffusion_model(xc, t, context=cc, y=c_adm, control=control, transformer_options=transformer_options)
elif self.conditioning_key == 'crossattn-adm':
assert c_adm is not None
cc = torch.cat(c_crossattn, 1)
out = self.diffusion_model(x, t, context=cc, y=c_adm, control=control)
out = self.diffusion_model(x, t, context=cc, y=c_adm, control=control, transformer_options=transformer_options)
elif self.conditioning_key == 'adm':
cc = c_crossattn[0]
out = self.diffusion_model(x, t, y=cc, control=control)
out = self.diffusion_model(x, t, y=cc, control=control, transformer_options=transformer_options)
else:
raise NotImplementedError()

View File

@ -504,10 +504,10 @@ class BasicTransformerBlock(nn.Module):
self.norm3 = nn.LayerNorm(dim)
self.checkpoint = checkpoint
def forward(self, x, context=None):
return checkpoint(self._forward, (x, context), self.parameters(), self.checkpoint)
def forward(self, x, context=None, transformer_options={}):
return checkpoint(self._forward, (x, context, transformer_options), self.parameters(), self.checkpoint)
def _forward(self, x, context=None):
def _forward(self, x, context=None, transformer_options={}):
x = self.attn1(self.norm1(x), context=context if self.disable_self_attn else None) + x
x = self.attn2(self.norm2(x), context=context) + x
x = self.ff(self.norm3(x)) + x
@ -557,7 +557,7 @@ class SpatialTransformer(nn.Module):
self.proj_out = zero_module(nn.Linear(in_channels, inner_dim))
self.use_linear = use_linear
def forward(self, x, context=None):
def forward(self, x, context=None, transformer_options={}):
# note: if no context is given, cross-attention defaults to self-attention
if not isinstance(context, list):
context = [context]
@ -570,7 +570,7 @@ class SpatialTransformer(nn.Module):
if self.use_linear:
x = self.proj_in(x)
for i, block in enumerate(self.transformer_blocks):
x = block(x, context=context[i])
x = block(x, context=context[i], transformer_options=transformer_options)
if self.use_linear:
x = self.proj_out(x)
x = rearrange(x, 'b (h w) c -> b c h w', h=h, w=w).contiguous()

View File

@ -76,12 +76,12 @@ class TimestepEmbedSequential(nn.Sequential, TimestepBlock):
support it as an extra input.
"""
def forward(self, x, emb, context=None):
def forward(self, x, emb, context=None, transformer_options={}):
for layer in self:
if isinstance(layer, TimestepBlock):
x = layer(x, emb)
elif isinstance(layer, SpatialTransformer):
x = layer(x, context)
x = layer(x, context, transformer_options)
else:
x = layer(x)
return x
@ -753,7 +753,7 @@ class UNetModel(nn.Module):
self.middle_block.apply(convert_module_to_f32)
self.output_blocks.apply(convert_module_to_f32)
def forward(self, x, timesteps=None, context=None, y=None, control=None, **kwargs):
def forward(self, x, timesteps=None, context=None, y=None, control=None, transformer_options={}, **kwargs):
"""
Apply the model to an input batch.
:param x: an [N x C x ...] Tensor of inputs.
@ -762,6 +762,7 @@ class UNetModel(nn.Module):
:param y: an [N] Tensor of labels, if class-conditional.
:return: an [N x C x ...] Tensor of outputs.
"""
transformer_options["original_shape"] = list(x.shape)
assert (y is not None) == (
self.num_classes is not None
), "must specify y if and only if the model is class-conditional"
@ -775,13 +776,13 @@ class UNetModel(nn.Module):
h = x.type(self.dtype)
for id, module in enumerate(self.input_blocks):
h = module(h, emb, context)
h = module(h, emb, context, transformer_options)
if control is not None and 'input' in control and len(control['input']) > 0:
ctrl = control['input'].pop()
if ctrl is not None:
h += ctrl
hs.append(h)
h = self.middle_block(h, emb, context)
h = self.middle_block(h, emb, context, transformer_options)
if control is not None and 'middle' in control and len(control['middle']) > 0:
h += control['middle'].pop()
@ -793,7 +794,7 @@ class UNetModel(nn.Module):
hsp += ctrl
h = th.cat([h, hsp], dim=1)
del hsp
h = module(h, emb, context)
h = module(h, emb, context, transformer_options)
h = h.type(x.dtype)
if self.predict_codebook_ids:
return self.id_predictor(h)

View File

@ -26,7 +26,7 @@ class CFGDenoiser(torch.nn.Module):
#The main sampling function shared by all the samplers
#Returns predicted noise
def sampling_function(model_function, x, timestep, uncond, cond, cond_scale, cond_concat=None):
def sampling_function(model_function, x, timestep, uncond, cond, cond_scale, cond_concat=None, model_options={}):
def get_area_and_mult(cond, x_in, cond_concat_in, timestep_in):
area = (x_in.shape[2], x_in.shape[3], 0, 0)
strength = 1.0
@ -169,6 +169,9 @@ def sampling_function(model_function, x, timestep, uncond, cond, cond_scale, con
if control is not None:
c['control'] = control.get_control(input_x, timestep_, c['c_crossattn'], len(cond_or_uncond))
if 'transformer_options' in model_options:
c['transformer_options'] = model_options['transformer_options']
output = model_function(input_x, timestep_, cond=c).chunk(batch_chunks)
del input_x
@ -221,7 +224,7 @@ class KSamplerX0Inpaint(torch.nn.Module):
def forward(self, x, sigma, uncond, cond, cond_scale, denoise_mask, cond_concat=None):
if denoise_mask is not None:
latent_mask = 1. - denoise_mask
x = x * denoise_mask + (self.latent_image + self.noise * sigma) * latent_mask
x = x * denoise_mask + (self.latent_image + self.noise * sigma.reshape([sigma.shape[0]] + [1] * (len(self.noise.shape) - 1))) * latent_mask
out = self.inner_model(x, sigma, cond=cond, uncond=uncond, cond_scale=cond_scale, cond_concat=cond_concat)
if denoise_mask is not None:
out *= denoise_mask
@ -242,7 +245,10 @@ def ddim_scheduler(model, steps):
sigs = []
ddim_timesteps = make_ddim_timesteps(ddim_discr_method="uniform", num_ddim_timesteps=steps, num_ddpm_timesteps=model.inner_model.inner_model.num_timesteps, verbose=False)
for x in range(len(ddim_timesteps) - 1, -1, -1):
sigs.append(model.t_to_sigma(torch.tensor(ddim_timesteps[x])))
ts = ddim_timesteps[x]
if ts > 999:
ts = 999
sigs.append(model.t_to_sigma(torch.tensor(ts)))
sigs += [0.0]
return torch.FloatTensor(sigs)
@ -373,7 +379,7 @@ class KSampler:
def set_steps(self, steps, denoise=None):
self.steps = steps
if denoise is None:
if denoise is None or denoise > 0.9999:
self.sigmas = self._calculate_sigmas(steps)
else:
new_steps = int(steps/denoise)
@ -464,7 +470,7 @@ class KSampler:
x_T=z_enc,
x0=latent_image,
denoise_function=sampling_function,
cond_concat=cond_concat,
extra_args=extra_args,
mask=noise_mask,
to_zero=sigmas[-1]==0,
end_step=sigmas.shape[0] - 1)

View File

@ -439,9 +439,14 @@ class VAE:
model_management.unload_model()
self.first_stage_model = self.first_stage_model.to(self.device)
try:
samples = samples_in.to(self.device)
pixel_samples = self.first_stage_model.decode(1. / self.scale_factor * samples)
pixel_samples = torch.clamp((pixel_samples + 1.0) / 2.0, min=0.0, max=1.0)
free_memory = model_management.get_free_memory(self.device)
batch_number = int((free_memory * 0.7) / (2562 * samples_in.shape[2] * samples_in.shape[3] * 64))
batch_number = max(1, batch_number)
pixel_samples = torch.empty((samples_in.shape[0], 3, round(samples_in.shape[2] * 8), round(samples_in.shape[3] * 8)), device="cpu")
for x in range(0, samples_in.shape[0], batch_number):
samples = samples_in[x:x+batch_number].to(self.device)
pixel_samples[x:x+batch_number] = torch.clamp((self.first_stage_model.decode(1. / self.scale_factor * samples) + 1.0) / 2.0, min=0.0, max=1.0).cpu()
except model_management.OOM_EXCEPTION as e:
print("Warning: Ran out of memory when regular VAE decoding, retrying with tiled VAE decoding.")
pixel_samples = self.decode_tiled_(samples_in)

View File

@ -10,7 +10,7 @@ import gc
import torch
import nodes
def get_input_data(inputs, class_def, outputs={}, prompt={}, extra_data={}):
def get_input_data(inputs, class_def, unique_id, outputs={}, prompt={}, extra_data={}):
valid_inputs = class_def.INPUT_TYPES()
input_data_all = {}
for x in inputs:
@ -34,6 +34,8 @@ def get_input_data(inputs, class_def, outputs={}, prompt={}, extra_data={}):
if h[x] == "EXTRA_PNGINFO":
if "extra_pnginfo" in extra_data:
input_data_all[x] = extra_data['extra_pnginfo']
if h[x] == "UNIQUE_ID":
input_data_all[x] = unique_id
return input_data_all
def recursive_execute(server, prompt, outputs, current_item, extra_data={}):
@ -55,7 +57,7 @@ def recursive_execute(server, prompt, outputs, current_item, extra_data={}):
if input_unique_id not in outputs:
executed += recursive_execute(server, prompt, outputs, input_unique_id, extra_data)
input_data_all = get_input_data(inputs, class_def, outputs, prompt, extra_data)
input_data_all = get_input_data(inputs, class_def, unique_id, outputs, prompt, extra_data)
if server.client_id is not None:
server.last_node_id = unique_id
server.send_sync("executing", { "node": unique_id }, server.client_id)
@ -63,8 +65,11 @@ def recursive_execute(server, prompt, outputs, current_item, extra_data={}):
nodes.before_node_execution()
outputs[unique_id] = getattr(obj, obj.FUNCTION)(**input_data_all)
if "ui" in outputs[unique_id] and server.client_id is not None:
server.send_sync("executed", { "node": unique_id, "output": outputs[unique_id]["ui"] }, server.client_id)
if "ui" in outputs[unique_id]:
if server.client_id is not None:
server.send_sync("executed", { "node": unique_id, "output": outputs[unique_id]["ui"] }, server.client_id)
if "result" in outputs[unique_id]:
outputs[unique_id] = outputs[unique_id]["result"]
return executed + [unique_id]
def recursive_will_execute(prompt, outputs, current_item):
@ -96,7 +101,7 @@ def recursive_output_delete_if_changed(prompt, old_prompt, outputs, current_item
if unique_id in old_prompt and 'is_changed' in old_prompt[unique_id]:
is_changed_old = old_prompt[unique_id]['is_changed']
if 'is_changed' not in prompt[unique_id]:
input_data_all = get_input_data(inputs, class_def, outputs)
input_data_all = get_input_data(inputs, class_def, unique_id, outputs)
if input_data_all is not None:
is_changed = class_def.IS_CHANGED(**input_data_all)
prompt[unique_id]['is_changed'] = is_changed

23
main.py
View File

@ -12,12 +12,13 @@ if os.name == "nt":
if __name__ == "__main__":
if '--help' in sys.argv:
print("Valid Command line Arguments:")
print("\t--listen\t\t\tListen on 0.0.0.0 so the UI can be accessed from other computers.")
print("\t--listen [ip]\t\t\tListen on ip or 0.0.0.0 if none given so the UI can be accessed from other computers.")
print("\t--port 8188\t\t\tSet the listen port.")
print("\t--dont-upcast-attention\t\tDisable upcasting of attention \n\t\t\t\t\tcan boost speed but increase the chances of black images.\n")
print("\t--use-split-cross-attention\tUse the split cross attention optimization instead of the sub-quadratic one.\n\t\t\t\t\tIgnored when xformers is used.")
print("\t--use-pytorch-cross-attention\tUse the new pytorch 2.0 cross attention function.")
print("\t--disable-xformers\t\tdisables xformers")
print("\t--cuda-device 1\t\tSet the id of the cuda device this instance will use.")
print()
print("\t--highvram\t\t\tBy default models will be unloaded to CPU memory after being used.\n\t\t\t\t\tThis option keeps them in GPU memory.\n")
print("\t--normalvram\t\t\tUsed to force normal vram use if lowvram gets automatically enabled.")
@ -31,6 +32,14 @@ if __name__ == "__main__":
print("disabling upcasting of attention")
os.environ['ATTN_PRECISION'] = "fp16"
try:
index = sys.argv.index('--cuda-device')
device = sys.argv[index + 1]
os.environ['CUDA_VISIBLE_DEVICES'] = device
print("Set cuda device to:", device)
except:
pass
import execution
import server
import folder_paths
@ -92,11 +101,19 @@ if __name__ == "__main__":
hijack_progress(server)
threading.Thread(target=prompt_worker, daemon=True, args=(q,server,)).start()
if '--listen' in sys.argv:
try:
address = '0.0.0.0'
else:
p_index = sys.argv.index('--listen')
try:
ip = sys.argv[p_index + 1]
if ip[:2] != '--':
address = ip
except:
pass
except:
address = '127.0.0.1'
dont_print = False
if '--dont-print-server' in sys.argv:
dont_print = True

View File

@ -47,7 +47,7 @@
" !git pull\n",
"\n",
"!echo -= Install dependencies =-\n",
"!pip -q install xformers -r requirements.txt"
"!pip install xformers==0.0.16 -r requirements.txt --extra-index-url https://download.pytorch.org/whl/cu117"
]
},
{

View File

@ -3,7 +3,7 @@ torchdiffeq
torchsde
einops
open-clip-torch
transformers
transformers>=4.25.1
safetensors
pytorch_lightning
aiohttp

View File

@ -0,0 +1,351 @@
import { app } from "/scripts/app.js";
import { $el } from "/scripts/ui.js";
import { api } from "/scripts/api.js";
// Manage color palettes
const colorPalettes = {
"palette_1": {
"id": "palette_1",
"name": "Palette 1",
"colors": {
"node_slot": {
"CLIP": "#FFD500", // bright yellow
"CLIP_VISION": "#A8DADC", // light blue-gray
"CLIP_VISION_OUTPUT": "#ad7452", // rusty brown-orange
"CONDITIONING": "#FFA931", // vibrant orange-yellow
"CONTROL_NET": "#6EE7B7", // soft mint green
"IMAGE": "#64B5F6", // bright sky blue
"LATENT": "#FF9CF9", // light pink-purple
"MASK": "#81C784", // muted green
"MODEL": "#B39DDB", // light lavender-purple
"STYLE_MODEL": "#C2FFAE", // light green-yellow
"VAE": "#FF6E6E", // bright red
}
}
},
"palette_2": {
"id": "palette_2",
"name": "Palette 2",
"colors": {
"node_slot": {
"CLIP": "#556B2F", // Dark Olive Green
"CLIP_VISION": "#4B0082", // Indigo
"CLIP_VISION_OUTPUT": "#006400", // Green
"CONDITIONING": "#FF1493", // Deep Pink
"CONTROL_NET": "#8B4513", // Saddle Brown
"IMAGE": "#8B0000", // Dark Red
"LATENT": "#00008B", // Dark Blue
"MASK": "#2F4F4F", // Dark Slate Grey
"MODEL": "#FF8C00", // Dark Orange
"STYLE_MODEL": "#004A4A", // Sherpa Blue
"UPSCALE_MODEL": "#4A004A", // Tyrian Purple
"VAE": "#4F394F", // Loulou
}
}
}
};
const id = "Comfy.ColorPalette";
const idCustomColorPalettes = "Comfy.CustomColorPalettes";
const defaultColorPaletteId = "palette_1";
const els = {}
// const ctxMenu = LiteGraph.ContextMenu;
app.registerExtension({
name: id,
init() {
const sortObjectKeys = (unordered) => {
return Object.keys(unordered).sort().reduce((obj, key) => {
obj[key] = unordered[key];
return obj;
}, {});
};
const getSlotTypes = async () => {
var types = [];
const defs = await api.getNodeDefs();
for (const nodeId in defs) {
const nodeData = defs[nodeId];
var inputs = nodeData["input"]["required"];
if (nodeData["input"]["optional"] != undefined){
inputs = Object.assign({}, nodeData["input"]["required"], nodeData["input"]["optional"])
}
for (const inputName in inputs) {
const inputData = inputs[inputName];
const type = inputData[0];
if (!Array.isArray(type)) {
types.push(type);
}
}
for (const o in nodeData["output"]) {
const output = nodeData["output"][o];
types.push(output);
}
}
return types;
};
const completeColorPalette = async (colorPalette) => {
var types = await getSlotTypes();
for (const type of types) {
if (!colorPalette.colors.node_slot[type]) {
colorPalette.colors.node_slot[type] = "";
}
}
colorPalette.colors.node_slot = sortObjectKeys(colorPalette.colors.node_slot);
return colorPalette;
};
const getColorPaletteTemplate = async () => {
let colorPalette = {
"id": "my_color_palette_unique_id",
"name": "My Color Palette",
"colors": {
"node_slot": {
}
}
};
return completeColorPalette(colorPalette);
};
const getCustomColorPalettes = () => {
return app.ui.settings.getSettingValue(idCustomColorPalettes, {});
};
const setCustomColorPalettes = (customColorPalettes) => {
return app.ui.settings.setSettingValue(idCustomColorPalettes, customColorPalettes);
};
const addCustomColorPalette = async (colorPalette) => {
if (typeof(colorPalette) !== "object") {
app.ui.dialog.show("Invalid color palette");
return;
}
if (!colorPalette.id) {
app.ui.dialog.show("Color palette missing id");
return;
}
if (!colorPalette.name) {
app.ui.dialog.show("Color palette missing name");
return;
}
if (!colorPalette.colors) {
app.ui.dialog.show("Color palette missing colors");
return;
}
if (colorPalette.colors.node_slot && typeof(colorPalette.colors.node_slot) !== "object") {
app.ui.dialog.show("Invalid color palette colors.node_slot");
return;
}
let customColorPalettes = getCustomColorPalettes();
customColorPalettes[colorPalette.id] = colorPalette;
setCustomColorPalettes(customColorPalettes);
for (const option of els.select.childNodes) {
if (option.value === "custom_" + colorPalette.id) {
els.select.removeChild(option);
}
}
els.select.append($el("option", { textContent: colorPalette.name + " (custom)", value: "custom_" + colorPalette.id, selected: true }));
setColorPalette("custom_" + colorPalette.id);
await loadColorPalette(colorPalette);
};
const deleteCustomColorPalette = async (colorPaletteId) => {
let customColorPalettes = getCustomColorPalettes();
delete customColorPalettes[colorPaletteId];
setCustomColorPalettes(customColorPalettes);
for (const option of els.select.childNodes) {
if (option.value === defaultColorPaletteId) {
option.selected = true;
}
if (option.value === "custom_" + colorPaletteId) {
els.select.removeChild(option);
}
}
setColorPalette(defaultColorPaletteId);
await loadColorPalette(getColorPalette());
};
const loadColorPalette = async (colorPalette) => {
colorPalette = await completeColorPalette(colorPalette);
if (colorPalette.colors) {
if (colorPalette.colors.node_slot) {
Object.assign(app.canvas.default_connection_color_byType, colorPalette.colors.node_slot);
app.canvas.draw(true, true);
}
}
};
const getColorPalette = (colorPaletteId) => {
if (!colorPaletteId) {
colorPaletteId = app.ui.settings.getSettingValue(id, defaultColorPaletteId);
}
if (colorPaletteId.startsWith("custom_")) {
colorPaletteId = colorPaletteId.substr(7);
let customColorPalettes = getCustomColorPalettes();
if (customColorPalettes[colorPaletteId]) {
return customColorPalettes[colorPaletteId];
}
}
return colorPalettes[colorPaletteId];
};
const setColorPalette = (colorPaletteId) => {
app.ui.settings.setSettingValue(id, colorPaletteId);
};
const fileInput = $el("input", {
type: "file",
accept: ".json",
style: { display: "none" },
parent: document.body,
onchange: () => {
let file = fileInput.files[0];
if (file.type === "application/json" || file.name.endsWith(".json")) {
const reader = new FileReader();
reader.onload = async () => {
await addCustomColorPalette(JSON.parse(reader.result));
};
reader.readAsText(file);
}
},
});
app.ui.settings.addSetting({
id,
name: "Color Palette",
type: (name, setter, value) => {
let options = [];
for (const c in colorPalettes) {
const colorPalette = colorPalettes[c];
options.push($el("option", { textContent: colorPalette.name, value: colorPalette.id, selected: colorPalette.id === value }));
}
let customColorPalettes = getCustomColorPalettes();
for (const c in customColorPalettes) {
const colorPalette = customColorPalettes[c];
options.push($el("option", { textContent: colorPalette.name + " (custom)", value: "custom_" + colorPalette.id, selected: "custom_" + colorPalette.id === value }));
}
return $el("div", [
$el("label", { textContent: name || id }, [
els.select = $el("select", {
onchange: (e) => {
setter(e.target.value);
}
}, options)
]),
$el("input", {
type: "button",
value: "Export",
onclick: async () => {
const colorPaletteId = app.ui.settings.getSettingValue(id, defaultColorPaletteId);
const colorPalette = await completeColorPalette(getColorPalette(colorPaletteId));
const json = JSON.stringify(colorPalette, null, 2); // convert the data to a JSON string
const blob = new Blob([json], { type: "application/json" });
const url = URL.createObjectURL(blob);
const a = $el("a", {
href: url,
download: colorPaletteId + ".json",
style: { display: "none" },
parent: document.body,
});
a.click();
setTimeout(function () {
a.remove();
window.URL.revokeObjectURL(url);
}, 0);
},
}),
$el("input", {
type: "button",
value: "Import",
onclick: () => {
fileInput.click();
}
}),
$el("input", {
type: "button",
value: "Template",
onclick: async () => {
const colorPalette = await getColorPaletteTemplate();
const json = JSON.stringify(colorPalette, null, 2); // convert the data to a JSON string
const blob = new Blob([json], { type: "application/json" });
const url = URL.createObjectURL(blob);
const a = $el("a", {
href: url,
download: "color_palette.json",
style: { display: "none" },
parent: document.body,
});
a.click();
setTimeout(function () {
a.remove();
window.URL.revokeObjectURL(url);
}, 0);
}
}),
$el("input", {
type: "button",
value: "Delete",
onclick: async () => {
let colorPaletteId = app.ui.settings.getSettingValue(id, defaultColorPaletteId);
if (colorPalettes[colorPaletteId]) {
app.ui.dialog.show("You cannot delete built-in color palette");
return;
}
if (colorPaletteId.startsWith("custom_")) {
colorPaletteId = colorPaletteId.substr(7);
}
await deleteCustomColorPalette(colorPaletteId);
}
}),
]);
},
defaultValue: defaultColorPaletteId,
async onChange(value) {
if (!value) {
return;
}
if (colorPalettes[value]) {
await loadColorPalette(colorPalettes[value]);
} else if (value.startsWith("custom_")) {
value = value.substr(7);
let customColorPalettes = getCustomColorPalettes();
if (customColorPalettes[value]) {
await loadColorPalette(customColorPalettes[value]);
}
}
},
});
},
});

View File

@ -0,0 +1,100 @@
import { app } from "/scripts/app.js";
// Use widget values and dates in output filenames
app.registerExtension({
name: "Comfy.SaveImageExtraOutput",
async beforeRegisterNodeDef(nodeType, nodeData, app) {
if (nodeData.name === "SaveImage") {
const onNodeCreated = nodeType.prototype.onNodeCreated;
// Simple date formatter
const parts = {
d: (d) => d.getDate(),
M: (d) => d.getMonth() + 1,
h: (d) => d.getHours(),
m: (d) => d.getMinutes(),
s: (d) => d.getSeconds(),
};
const format =
Object.keys(parts)
.map((k) => k + k + "?")
.join("|") + "|yyy?y?";
function formatDate(text, date) {
return text.replace(new RegExp(format, "g"), function (text) {
if (text === "yy") return (date.getFullYear() + "").substring(2);
if (text === "yyyy") return date.getFullYear();
if (text[0] in parts) {
const p = parts[text[0]](date);
return (p + "").padStart(text.length, "0");
}
return text;
});
}
// When the SaveImage node is created we want to override the serialization of the output name widget to run our S&R
nodeType.prototype.onNodeCreated = function () {
const r = onNodeCreated ? onNodeCreated.apply(this, arguments) : undefined;
const widget = this.widgets.find((w) => w.name === "filename_prefix");
widget.serializeValue = () => {
return widget.value.replace(/%([^%]+)%/g, function (match, text) {
const split = text.split(".");
if (split.length !== 2) {
// Special handling for dates
if (split[0].startsWith("date:")) {
return formatDate(split[0].substring(5), new Date());
}
if (text !== "width" && text !== "height") {
// Dont warn on standard replacements
console.warn("Invalid replacement pattern", text);
}
return match;
}
// Find node with matching S&R property name
let nodes = app.graph._nodes.filter((n) => n.properties?.["Node name for S&R"] === split[0]);
// If we cant, see if there is a node with that title
if (!nodes.length) {
nodes = app.graph._nodes.filter((n) => n.title === split[0]);
}
if (!nodes.length) {
console.warn("Unable to find node", split[0]);
return match;
}
if (nodes.length > 1) {
console.warn("Multiple nodes matched", split[0], "using first match");
}
const node = nodes[0];
const widget = node.widgets?.find((w) => w.name === split[1]);
if (!widget) {
console.warn("Unable to find widget", split[1], "on node", split[0], node);
return match;
}
return ((widget.value ?? "") + "").replaceAll(/\/|\\/g, "_");
});
};
return r;
};
} else {
// When any other node is created add a property to alias the node
const onNodeCreated = nodeType.prototype.onNodeCreated;
nodeType.prototype.onNodeCreated = function () {
const r = onNodeCreated ? onNodeCreated.apply(this, arguments) : undefined;
if (!this.properties || !("Node name for S&R" in this.properties)) {
this.addProperty("Node name for S&R", this.title, "string");
}
return r;
};
}
},
});

View File

@ -105,7 +105,7 @@ app.registerExtension({
callback: () => convertToWidget(this, w),
});
} else {
const config = nodeData?.input?.required[w.name] || [w.type, w.options || {}];
const config = nodeData?.input?.required[w.name] || nodeData?.input?.optional?.[w.name] || [w.type, w.options || {}];
if (isConvertableWidget(w, config)) {
toInput.push({
content: `Convert ${w.name} to input`,

View File

@ -417,6 +417,59 @@ class ComfyApp {
};
}
/**
* Handle keypress
*
* Ctrl + M mute/unmute selected nodes
*/
#addProcessKeyHandler() {
const self = this;
const origProcessKey = LGraphCanvas.prototype.processKey;
LGraphCanvas.prototype.processKey = function(e) {
const res = origProcessKey.apply(this, arguments);
if (res === false) {
return res;
}
if (!this.graph) {
return;
}
var block_default = false;
if (e.target.localName == "input") {
return;
}
if (e.type == "keydown") {
// Ctrl + M mute/unmute
if (e.keyCode == 77 && e.ctrlKey) {
if (this.selected_nodes) {
for (var i in this.selected_nodes) {
if (this.selected_nodes[i].mode === 2) { // never
this.selected_nodes[i].mode = 0; // always
} else {
this.selected_nodes[i].mode = 2; // never
}
}
}
block_default = true;
}
}
this.graph.change();
if (block_default) {
e.preventDefault();
e.stopImmediatePropagation();
return false;
}
return res;
};
}
/**
* Draws group header bar
*/
@ -465,10 +518,11 @@ class ComfyApp {
* Draws node highlights (executing, drag drop) and progress bar
*/
#addDrawNodeHandler() {
const orig = LGraphCanvas.prototype.drawNodeShape;
const origDrawNodeShape = LGraphCanvas.prototype.drawNodeShape;
const self = this;
LGraphCanvas.prototype.drawNodeShape = function (node, ctx, size, fgcolor, bgcolor, selected, mouse_over) {
const res = orig.apply(this, arguments);
const res = origDrawNodeShape.apply(this, arguments);
let color = null;
if (node.id === +self.runningNodeId) {
@ -517,6 +571,21 @@ class ComfyApp {
return res;
};
const origDrawNode = LGraphCanvas.prototype.drawNode;
LGraphCanvas.prototype.drawNode = function (node, ctx) {
var editor_alpha = this.editor_alpha;
if (node.mode === 2) { // never
this.editor_alpha = 0.4;
}
const res = origDrawNode.apply(this, arguments);
this.editor_alpha = editor_alpha;
return res;
};
}
/**
@ -548,6 +617,10 @@ class ComfyApp {
api.addEventListener("executed", ({ detail }) => {
this.nodeOutputs[detail.node] = detail.output;
const node = this.graph.getNodeById(detail.node);
if (node?.onExecuted) {
node.onExecuted(detail.output);
}
});
api.init();
@ -576,27 +649,6 @@ class ComfyApp {
}
}
/**
* Setup slot colors for types
*/
setupSlotColors() {
let colors = {
"CLIP": "#FFD500", // bright yellow
"CLIP_VISION": "#A8DADC", // light blue-gray
"CLIP_VISION_OUTPUT": "#ad7452", // rusty brown-orange
"CONDITIONING": "#FFA931", // vibrant orange-yellow
"CONTROL_NET": "#6EE7B7", // soft mint green
"IMAGE": "#64B5F6", // bright sky blue
"LATENT": "#FF9CF9", // light pink-purple
"MASK": "#81C784", // muted green
"MODEL": "#B39DDB", // light lavender-purple
"STYLE_MODEL": "#C2FFAE", // light green-yellow
"VAE": "#FF6E6E", // bright red
};
Object.assign(this.canvas.default_connection_color_byType, colors);
}
/**
* Set up the app on the page
*/
@ -609,13 +661,12 @@ class ComfyApp {
document.body.prepend(canvasEl);
this.#addProcessMouseHandler();
this.#addProcessKeyHandler();
this.graph = new LGraph();
const canvas = (this.canvas = new LGraphCanvas(canvasEl, this.graph));
this.ctx = canvasEl.getContext("2d");
this.setupSlotColors();
this.graph.start();
function resizeCanvas() {
@ -692,18 +743,22 @@ class ComfyApp {
const inputData = inputs[inputName];
const type = inputData[0];
if (Array.isArray(type)) {
// Enums
Object.assign(config, widgets.COMBO(this, inputName, inputData, app) || {});
} else if (`${type}:${inputName}` in widgets) {
// Support custom widgets by Type:Name
Object.assign(config, widgets[`${type}:${inputName}`](this, inputName, inputData, app) || {});
} else if (type in widgets) {
// Standard type widgets
Object.assign(config, widgets[type](this, inputName, inputData, app) || {});
} else {
// Node connection inputs
if(inputData[1]?.forceInput) {
this.addInput(inputName, type);
} else {
if (Array.isArray(type)) {
// Enums
Object.assign(config, widgets.COMBO(this, inputName, inputData, app) || {});
} else if (`${type}:${inputName}` in widgets) {
// Support custom widgets by Type:Name
Object.assign(config, widgets[`${type}:${inputName}`](this, inputName, inputData, app) || {});
} else if (type in widgets) {
// Standard type widgets
Object.assign(config, widgets[type](this, inputName, inputData, app) || {});
} else {
// Node connection inputs
this.addInput(inputName, type);
}
}
}
@ -744,6 +799,8 @@ class ComfyApp {
* @param {*} graphData A serialized graph object
*/
loadGraphData(graphData) {
this.clean();
if (!graphData) {
graphData = defaultGraph;
}
@ -802,6 +859,11 @@ class ComfyApp {
continue;
}
if (node.mode === 2) {
// Don't serialize muted nodes
continue;
}
const inputs = {};
const widgets = node.widgets;
@ -841,6 +903,18 @@ class ComfyApp {
};
}
// Remove inputs connected to removed nodes
for (const o in output) {
for (const i in output[o].inputs) {
if (Array.isArray(output[o].inputs[i])
&& output[o].inputs[i].length === 2
&& !output[output[o].inputs[i][0]]) {
delete output[o].inputs[i];
}
}
}
return { workflow, output };
}
@ -930,6 +1004,13 @@ class ComfyApp {
}
}
}
/**
* Clean current state
*/
clean() {
this.nodeOutputs = {};
}
}
export const app = new ComfyApp();

View File

@ -1,6 +1,6 @@
import { api } from "./api.js";
function $el(tag, propsOrChildren, children) {
export function $el(tag, propsOrChildren, children) {
const split = tag.split(".");
const element = document.createElement(split.shift());
element.classList.add(...split);
@ -114,6 +114,17 @@ class ComfySettingsDialog extends ComfyDialog {
this.settings = [];
}
getSettingValue(id, defaultValue) {
const settingId = "Comfy.Settings." + id;
const v = localStorage[settingId];
return v == null ? defaultValue : JSON.parse(v);
}
setSettingValue(id, value) {
const settingId = "Comfy.Settings." + id;
localStorage[settingId] = JSON.stringify(value);
}
addSetting({ id, name, type, defaultValue, onChange }) {
if (!id) {
throw new Error("Settings must have an ID");
@ -142,7 +153,7 @@ class ComfySettingsDialog extends ComfyDialog {
};
if (typeof type === "function") {
return type(name, setter);
return type(name, setter, value);
}
switch (type) {
@ -214,10 +225,10 @@ class ComfyList {
$el("button", {
textContent: "Load",
onclick: () => {
app.loadGraphData(item.prompt[3].extra_pnginfo.workflow);
if (item.outputs) {
app.nodeOutputs = item.outputs;
}
app.loadGraphData(item.prompt[3].extra_pnginfo.workflow);
},
}),
$el("button", {
@ -377,7 +388,10 @@ export class ComfyUI {
}),
$el("button", { textContent: "Load", onclick: () => fileInput.click() }),
$el("button", { textContent: "Refresh", onclick: () => app.refreshComboInNodes() }),
$el("button", { textContent: "Clear", onclick: () => app.graph.clear() }),
$el("button", { textContent: "Clear", onclick: () => {
app.clean();
app.graph.clear();
}}),
$el("button", { textContent: "Load Default", onclick: () => app.loadGraphData() }),
]);

View File

@ -64,6 +64,12 @@ body {
margin-bottom: 20px; /* Add some margin between the text and the close button*/
}
.comfy-modal select,
.comfy-modal input[type=button],
.comfy-modal input[type=checkbox] {
margin: 3px 3px 3px 4px;
}
.comfy-modal button {
cursor: pointer;
color: #aaaaaa;
@ -95,6 +101,12 @@ body {
display: flex;
flex-direction: column;
align-items: center;
color: #999;
background-color: #353535;
font-family: sans-serif;
padding: 10px;
border-radius: 0 8px 8px 8px;
box-shadow: 3px 3px 8px rgba(0, 0, 0, 0.4);
}
.comfy-menu button {
@ -109,6 +121,22 @@ body {
.comfy-menu-btns button {
font-size: 10px;
width: 50%;
color: #999 !important;
}
.comfy-menu > button {
width: 100%;
}
.comfy-menu > button,
.comfy-menu-btns button,
.comfy-menu .comfy-list button {
color: #ddd;
background-color: #222;
border-radius: 8px;
border-color: #4e4e4e;
border-style: solid;
margin-top: 2px;
}
.comfy-menu span.drag-handle {
@ -141,14 +169,18 @@ body {
}
.comfy-list {
background-color: rgb(225, 225, 225);
color: #999;
background-color: #333;
margin-bottom: 10px;
border-color: #4e4e4e;
border-style: solid;
}
.comfy-list-items {
overflow-y: scroll;
max-height: 100px;
background-color: #d0d0d0;
min-height: 25px;
background-color: #222;
padding: 5px;
}
@ -175,6 +207,7 @@ body {
}
button.comfy-settings-btn {
background-color: rgba(0, 0, 0, 0);
font-size: 12px;
padding: 0;
position: absolute;
@ -182,6 +215,10 @@ button.comfy-settings-btn {
border: none;
}
button.comfy-queue-btn {
margin: 6px 0 !important;
}
.comfy-modal.comfy-settings {
background-color: var(--bg-color);
color: var(--fg-color);
@ -190,6 +227,13 @@ button.comfy-settings-btn {
@media only screen and (max-height: 850px) {
.comfy-menu {
margin-top: -70px;
top: 0 !important;
bottom: 0 !important;
left: auto !important;
right: 0 !important;
border-radius: 0px;
}
.comfy-menu span.drag-handle {
visibility:hidden
}
}