This commit is contained in:
jwd-dev 2023-04-16 01:32:46 -04:00
commit db1f7d3d96
15 changed files with 1840 additions and 83 deletions

View File

@ -32,14 +32,28 @@ This ui will let you design and execute advanced stable diffusion pipelines usin
Workflow examples can be found on the [Examples page](https://comfyanonymous.github.io/ComfyUI_examples/)
## Shortcuts
- **Ctrl + A** select all nodes
- **Ctrl + M** mute/unmute selected nodes
- **Delete** or **Backspace** delete selected nodes
- **Space** Holding space key while moving the cursor moves the canvas around. It works when holding the mouse button down so it is easier to connect different nodes when the canvas gets too large.
- **Ctrl/Shift + Click** Add clicked node to selection.
- **Ctrl + C/Ctrl + V** - Copy and paste selected nodes, without maintaining the connection to the outputs of unselected nodes.
- **Ctrl + C/Ctrl + Shift + V** - Copy and paste selected nodes, and maintaining the connection from the outputs of unselected nodes to the inputs of the newly pasted nodes.
- Holding **Shift** and drag selected nodes - Move multiple selected nodes at the same time.
| Keybind | Explanation |
| - | - |
| Ctrl + Enter | Queue up current graph for generation |
| Ctrl + Shift + Enter | Queue up current graph as first for generation |
| Ctrl + S | Save workflow |
| Ctrl + O | Load workflow |
| Ctrl + A | Select all nodes |
| Ctrl + M | Mute/unmute selected nodes |
| Delete/Backspace | Delete selected nodes |
| Ctrl + Delete/Backspace | Delete the current graph |
| Space | Move the canvas around when held and moving the cursor |
| Ctrl/Shift + Click | Add clicked node to selection |
| Ctrl + C/Ctrl + V | Copy and paste selected nodes (without maintaining connections to outputs of unselected nodes) |
| Ctrl + C/Ctrl + Shift + V| Copy and paste selected nodes (maintaining connections from outputs of unselected nodes to inputs of pasted nodes) |
| Shift + Drag | Move multiple selected nodes at the same time |
| Ctrl + D | Load default graph |
| Q | Toggle visibility of the queue |
| H | Toggle visibility of history |
| R | Refresh graph |
Ctrl can also be replaced with Cmd instead for MacOS users
# Installing

View File

@ -9,7 +9,7 @@ from typing import Optional, Any
from ldm.modules.diffusionmodules.util import checkpoint
from .sub_quadratic_attention import efficient_dot_product_attention
import model_management
from comfy import model_management
from . import tomesd

View File

@ -7,7 +7,7 @@ from einops import rearrange
from typing import Optional, Any
from ldm.modules.attention import MemoryEfficientCrossAttention
import model_management
from comfy import model_management
if model_management.xformers_enabled_vae():
import xformers

View File

@ -24,7 +24,7 @@ except ImportError:
from torch import Tensor
from typing import List
import model_management
from comfy import model_management
def dynamic_slice(
x: Tensor,

View File

@ -3,7 +3,7 @@ from .k_diffusion import external as k_diffusion_external
from .extra_samplers import uni_pc
import torch
import contextlib
import model_management
from comfy import model_management
from .ldm.models.diffusion.ddim import DDIMSampler
from .ldm.modules.diffusionmodules.util import make_ddim_timesteps

View File

@ -4,7 +4,7 @@ import copy
import sd1_clip
import sd2_clip
import model_management
from comfy import model_management
from .ldm.util import instantiate_from_config
from .ldm.models.autoencoder import AutoencoderKL
import yaml
@ -372,10 +372,12 @@ class CLIP:
def clip_layer(self, layer_idx):
self.layer_idx = layer_idx
def encode(self, text):
def tokenize(self, text, return_word_ids=False):
return self.tokenizer.tokenize_with_weights(text, return_word_ids)
def encode_from_tokens(self, tokens):
if self.layer_idx is not None:
self.cond_stage_model.clip_layer(self.layer_idx)
tokens = self.tokenizer.tokenize_with_weights(text)
try:
self.patcher.patch_model()
cond = self.cond_stage_model.encode_token_weights(tokens)
@ -385,6 +387,10 @@ class CLIP:
raise e
return cond
def encode(self, text):
tokens = self.tokenize(text)
return self.encode_from_tokens(tokens)
class VAE:
def __init__(self, ckpt_path=None, scale_factor=0.18215, device=None, config=None):
if config is None:

View File

@ -260,60 +260,97 @@ class SD1Tokenizer:
self.inv_vocab = {v: k for k, v in vocab.items()}
self.embedding_directory = embedding_directory
self.max_word_length = 8
self.embedding_identifier = "embedding:"
def _try_get_embedding(self, embedding_name:str):
'''
Takes a potential embedding name and tries to retrieve it.
Returns a Tuple consisting of the embedding and any leftover string, embedding can be None.
'''
embed = load_embed(embedding_name, self.embedding_directory)
if embed is None:
stripped = embedding_name.strip(',')
if len(stripped) < len(embedding_name):
embed = load_embed(stripped, self.embedding_directory)
return (embed, embedding_name[len(stripped):])
return (embed, "")
def tokenize_with_weights(self, text:str, return_word_ids=False):
'''
Takes a prompt and converts it to a list of (token, weight, word id) elements.
Tokens can both be integer tokens and pre computed CLIP tensors.
Word id values are unique per word and embedding, where the id 0 is reserved for non word tokens.
Returned list has the dimensions NxM where M is the input size of CLIP
'''
if self.pad_with_end:
pad_token = self.end_token
else:
pad_token = 0
def tokenize_with_weights(self, text):
text = escape_important(text)
parsed_weights = token_weights(text, 1.0)
#tokenize words
tokens = []
for t in parsed_weights:
to_tokenize = unescape_important(t[0]).replace("\n", " ").split(' ')
while len(to_tokenize) > 0:
word = to_tokenize.pop(0)
temp_tokens = []
embedding_identifier = "embedding:"
if word.startswith(embedding_identifier) and self.embedding_directory is not None:
embedding_name = word[len(embedding_identifier):].strip('\n')
embed = load_embed(embedding_name, self.embedding_directory)
for weighted_segment, weight in parsed_weights:
to_tokenize = unescape_important(weighted_segment).replace("\n", " ").split(' ')
to_tokenize = [x for x in to_tokenize if x != ""]
for word in to_tokenize:
#if we find an embedding, deal with the embedding
if word.startswith(self.embedding_identifier) and self.embedding_directory is not None:
embedding_name = word[len(self.embedding_identifier):].strip('\n')
embed, leftover = self._try_get_embedding(embedding_name)
if embed is None:
stripped = embedding_name.strip(',')
if len(stripped) < len(embedding_name):
embed = load_embed(stripped, self.embedding_directory)
if embed is not None:
to_tokenize.insert(0, embedding_name[len(stripped):])
if embed is not None:
if len(embed.shape) == 1:
temp_tokens += [(embed, t[1])]
else:
for x in range(embed.shape[0]):
temp_tokens += [(embed[x], t[1])]
print(f"warning, embedding:{embedding_name} does not exist, ignoring")
else:
print("warning, embedding:{} does not exist, ignoring".format(embedding_name))
elif len(word) > 0:
tt = self.tokenizer(word)["input_ids"][1:-1]
for x in tt:
temp_tokens += [(x, t[1])]
tokens_left = self.max_tokens_per_section - (len(tokens) % self.max_tokens_per_section)
if len(embed.shape) == 1:
tokens.append([(embed, weight)])
else:
tokens.append([(embed[x], weight) for x in range(embed.shape[0])])
#if we accidentally have leftover text, continue parsing using leftover, else move on to next word
if leftover != "":
word = leftover
else:
continue
#parse word
tokens.append([(t, weight) for t in self.tokenizer(word)["input_ids"][1:-1]])
#try not to split words in different sections
if tokens_left < len(temp_tokens) and len(temp_tokens) < (self.max_word_length):
for x in range(tokens_left):
tokens += [(self.end_token, 1.0)]
tokens += temp_tokens
#reshape token array to CLIP input size
batched_tokens = []
batch = [(self.start_token, 1.0, 0)]
batched_tokens.append(batch)
for i, t_group in enumerate(tokens):
#determine if we're going to try and keep the tokens in a single batch
is_large = len(t_group) >= self.max_word_length
out_tokens = []
for x in range(0, len(tokens), self.max_tokens_per_section):
o_token = [(self.start_token, 1.0)] + tokens[x:min(self.max_tokens_per_section + x, len(tokens))]
o_token += [(self.end_token, 1.0)]
if self.pad_with_end:
o_token +=[(self.end_token, 1.0)] * (self.max_length - len(o_token))
else:
o_token +=[(0, 1.0)] * (self.max_length - len(o_token))
while len(t_group) > 0:
if len(t_group) + len(batch) > self.max_length - 1:
remaining_length = self.max_length - len(batch) - 1
#break word in two and add end token
if is_large:
batch.extend([(t,w,i+1) for t,w in t_group[:remaining_length]])
batch.append((self.end_token, 1.0, 0))
t_group = t_group[remaining_length:]
#add end token and pad
else:
batch.append((self.end_token, 1.0, 0))
batch.extend([(pad_token, 1.0, 0)] * (remaining_length))
#start new batch
batch = [(self.start_token, 1.0, 0)]
batched_tokens.append(batch)
else:
batch.extend([(t,w,i+1) for t,w in t_group])
t_group = []
out_tokens += [o_token]
#fill last batch
batch.extend([(self.end_token, 1.0, 0)] + [(pad_token, 1.0, 0)] * (self.max_length - len(batch) - 1))
if not return_word_ids:
batched_tokens = [[(t, w) for t, w,_ in x] for x in batched_tokens]
return batched_tokens
return out_tokens
def untokenize(self, token_weight_pair):
return list(map(lambda a: (a, self.inv_vocab[a[0]]), token_weight_pair))

View File

@ -1,4 +1,4 @@
import sd1_clip
from comfy import sd1_clip
import torch
import os

View File

@ -1,6 +1,6 @@
import os
from comfy_extras.chainner_models import model_loading
import model_management
from comfy import model_management
import torch
import comfy.utils
import folder_paths

View File

@ -21,16 +21,16 @@ import comfy.utils
import comfy.clip_vision
import model_management
import comfy.model_management
import importlib
import folder_paths
def before_node_execution():
model_management.throw_exception_if_processing_interrupted()
comfy.model_management.throw_exception_if_processing_interrupted()
def interrupt_processing(value=True):
model_management.interrupt_current_processing(value)
comfy.model_management.interrupt_current_processing(value)
MAX_RESOLUTION=8192
@ -241,7 +241,7 @@ class DiffusersLoader:
model_path = os.path.join(search_path, model_path)
break
return comfy.diffusers_convert.load_diffusers(model_path, fp16=model_management.should_use_fp16(), output_vae=output_vae, output_clip=output_clip, embedding_directory=folder_paths.get_folder_paths("embeddings"))
return comfy.diffusers_convert.load_diffusers(model_path, fp16=comfy.model_management.should_use_fp16(), output_vae=output_vae, output_clip=output_clip, embedding_directory=folder_paths.get_folder_paths("embeddings"))
class unCLIPCheckpointLoader:
@ -680,7 +680,7 @@ class SetLatentNoiseMask:
def common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent, denoise=1.0, disable_noise=False, start_step=None, last_step=None, force_full_denoise=False):
latent_image = latent["samples"]
noise_mask = None
device = model_management.get_torch_device()
device = comfy.model_management.get_torch_device()
if disable_noise:
noise = torch.zeros(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, device="cpu")
@ -696,7 +696,7 @@ def common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive,
noise_mask = noise_mask.to(device)
real_model = None
model_management.load_model_gpu(model)
comfy.model_management.load_model_gpu(model)
real_model = model.model
noise = noise.to(device)
@ -726,7 +726,7 @@ def common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive,
control_net_models = []
for x in control_nets:
control_net_models += x.get_control_models()
model_management.load_controlnet_gpu(control_net_models)
comfy.model_management.load_controlnet_gpu(control_net_models)
if sampler_name in comfy.samplers.KSampler.SAMPLERS:
sampler = comfy.samplers.KSampler(real_model, steps=steps, device=device, sampler=sampler_name, scheduler=scheduler, denoise=denoise, model_options=model.model_options)

View File

@ -0,0 +1,76 @@
import { app } from "/scripts/app.js";
const id = "Comfy.Keybinds";
app.registerExtension({
name: id,
init() {
const keybindListener = function(event) {
const target = event.composedPath()[0];
if (target.tagName === "INPUT" || target.tagName === "TEXTAREA") {
return;
}
const modifierPressed = event.ctrlKey || event.metaKey;
// Queue prompt using ctrl or command + enter
if (modifierPressed && (event.key === "Enter" || event.keyCode === 13 || event.keyCode === 10)) {
app.queuePrompt(event.shiftKey ? -1 : 0);
return;
}
const modifierKeyIdMap = {
"s": "#comfy-save-button",
83: "#comfy-save-button",
"o": "#comfy-file-input",
79: "#comfy-file-input",
"Backspace": "#comfy-clear-button",
8: "#comfy-clear-button",
"Delete": "#comfy-clear-button",
46: "#comfy-clear-button",
"d": "#comfy-load-default-button",
68: "#comfy-load-default-button",
};
const modifierKeybindId = modifierKeyIdMap[event.key] || modifierKeyIdMap[event.keyCode];
if (modifierPressed && modifierKeybindId) {
event.preventDefault();
const elem = document.querySelector(modifierKeybindId);
elem.click();
return;
}
// Finished Handling all modifier keybinds, now handle the rest
if (event.ctrlKey || event.altKey || event.metaKey) {
return;
}
// Close out of modals using escape
if (event.key === "Escape" || event.keyCode === 27) {
const modals = document.querySelectorAll(".comfy-modal");
const modal = Array.from(modals).find(modal => window.getComputedStyle(modal).getPropertyValue("display") !== "none");
if (modal) {
modal.style.display = "none";
}
}
const keyIdMap = {
"q": "#comfy-view-queue-button",
81: "#comfy-view-queue-button",
"h": "#comfy-view-history-button",
72: "#comfy-view-history-button",
"r": "#comfy-refresh-button",
82: "#comfy-refresh-button",
};
const buttonId = keyIdMap[event.key] || keyIdMap[event.keyCode];
if (buttonId) {
const button = document.querySelector(buttonId);
button.click();
}
}
window.addEventListener("keydown", keybindListener, true);
}
});

View File

@ -4,27 +4,48 @@ import { api } from "./api.js";
import { defaultGraph } from "./defaultGraph.js";
import { getPngMetadata, importA1111 } from "./pnginfo.js";
class ComfyApp {
/**
* List of {number, batchCount} entries to queue
/**
* @typedef {import("types/comfy").ComfyExtension} ComfyExtension
*/
export class ComfyApp {
/**
* List of entries to queue
* @type {{number: number, batchCount: number}[]}
*/
#queueItems = [];
/**
* If the queue is currently being processed
* @type {boolean}
*/
#processingQueue = false;
constructor() {
this.ui = new ComfyUI(this);
/**
* List of extensions that are registered with the app
* @type {ComfyExtension[]}
*/
this.extensions = [];
/**
* Stores the execution output data for each node
* @type {Record<string, any>}
*/
this.nodeOutputs = {};
/**
* If the shift key on the keyboard is pressed
* @type {boolean}
*/
this.shiftDown = false;
}
/**
* Invoke an extension callback
* @param {string} method The extension callback to execute
* @param {...any} args Any arguments to pass to the callback
* @param {keyof ComfyExtension} method The extension callback to execute
* @param {any[]} args Any arguments to pass to the callback
* @returns
*/
#invokeExtensions(method, ...args) {
@ -691,11 +712,6 @@ class ComfyApp {
#addKeyboardHandler() {
window.addEventListener("keydown", (e) => {
this.shiftDown = e.shiftKey;
// Queue prompt using ctrl or command + enter
if ((e.ctrlKey || e.metaKey) && (e.key === "Enter" || e.keyCode === 13 || e.keyCode === 10)) {
this.queuePrompt(e.shiftKey ? -1 : 0);
}
});
window.addEventListener("keyup", (e) => {
this.shiftDown = e.shiftKey;
@ -1120,6 +1136,10 @@ class ComfyApp {
}
}
/**
* Registers a Comfy web extension with the app
* @param {ComfyExtension} extension
*/
registerExtension(extension) {
if (!extension.name) {
throw new Error("Extensions must have a 'name' property.");

View File

@ -431,7 +431,15 @@ export class ComfyUI {
defaultValue: true,
});
const promptFilename = this.settings.addSetting({
id: "Comfy.PromptFilename",
name: "Prompt for filename when saving workflow",
type: "boolean",
defaultValue: true,
});
const fileInput = $el("input", {
id: "comfy-file-input",
type: "file",
accept: ".json,image/png",
style: { display: "none" },
@ -448,6 +456,7 @@ export class ComfyUI {
$el("button.comfy-settings-btn", { textContent: "⚙️", onclick: () => this.settings.show() }),
]),
$el("button.comfy-queue-btn", {
id: "queue-button",
textContent: "Queue Prompt",
onclick: () => app.queuePrompt(0, this.batchCount),
}),
@ -496,9 +505,10 @@ export class ComfyUI {
]),
]),
$el("div.comfy-menu-btns", [
$el("button", { textContent: "Queue Front", onclick: () => app.queuePrompt(-1, this.batchCount) }),
$el("button", { id: "queue-front-button", textContent: "Queue Front", onclick: () => app.queuePrompt(-1, this.batchCount) }),
$el("button", {
$: (b) => (this.queue.button = b),
id: "comfy-view-queue-button",
textContent: "View Queue",
onclick: () => {
this.history.hide();
@ -507,6 +517,7 @@ export class ComfyUI {
}),
$el("button", {
$: (b) => (this.history.button = b),
id: "comfy-view-history-button",
textContent: "View History",
onclick: () => {
this.queue.hide();
@ -517,14 +528,23 @@ export class ComfyUI {
this.queue.element,
this.history.element,
$el("button", {
id: "comfy-save-button",
textContent: "Save",
onclick: () => {
let filename = "workflow.json";
if (promptFilename.value) {
filename = prompt("Save workflow as:", filename);
if (!filename) return;
if (!filename.toLowerCase().endsWith(".json")) {
filename += ".json";
}
}
const json = JSON.stringify(app.graph.serialize(), null, 2); // convert the data to a JSON string
const blob = new Blob([json], { type: "application/json" });
const url = URL.createObjectURL(blob);
const a = $el("a", {
href: url,
download: "workflow.json",
download: filename,
style: { display: "none" },
parent: document.body,
});
@ -535,15 +555,15 @@ export class ComfyUI {
}, 0);
},
}),
$el("button", { textContent: "Load", onclick: () => fileInput.click() }),
$el("button", { textContent: "Refresh", onclick: () => app.refreshComboInNodes() }),
$el("button", { textContent: "Clear", onclick: () => {
$el("button", { id: "comfy-load-button", textContent: "Load", onclick: () => fileInput.click() }),
$el("button", { id: "comfy-refresh-button", textContent: "Refresh", onclick: () => app.refreshComboInNodes() }),
$el("button", { id: "comfy-clear-button", textContent: "Clear", onclick: () => {
if (!confirmClear.value || confirm("Clear workflow?")) {
app.clean();
app.graph.clear();
}
}}),
$el("button", { textContent: "Load Default", onclick: () => {
$el("button", { id: "comfy-load-default-button", textContent: "Load Default", onclick: () => {
if (!confirmClear.value || confirm("Load default workflow?")) {
app.loadGraphData()
}

78
web/types/comfy.d.ts vendored Normal file
View File

@ -0,0 +1,78 @@
import { LGraphNode, IWidget } from "./litegraph";
import { ComfyApp } from "/scripts/app";
export interface ComfyExtension {
/**
* The name of the extension
*/
name: string;
/**
* Allows any initialisation, e.g. loading resources. Called after the canvas is created but before nodes are added
* @param app The ComfyUI app instance
*/
init(app: ComfyApp): Promise<void>;
/**
* Allows any additonal setup, called after the application is fully set up and running
* @param app The ComfyUI app instance
*/
setup(app: ComfyApp): Promise<void>;
/**
* Called before nodes are registered with the graph
* @param defs The collection of node definitions, add custom ones or edit existing ones
* @param app The ComfyUI app instance
*/
addCustomNodeDefs(defs: Record<string, ComfyObjectInfo>, app: ComfyApp): Promise<void>;
/**
* Allows the extension to add custom widgets
* @param app The ComfyUI app instance
* @returns An array of {[widget name]: widget data}
*/
getCustomWidgets(
app: ComfyApp
): Promise<
Array<
Record<string, (node, inputName, inputData, app) => { widget?: IWidget; minWidth?: number; minHeight?: number }>
>
>;
/**
* Allows the extension to add additional handling to the node before it is registered with LGraph
* @param nodeType The node class (not an instance)
* @param nodeData The original node object info config object
* @param app The ComfyUI app instance
*/
beforeRegisterNodeDef(nodeType: typeof LGraphNode, nodeData: ComfyObjectInfo, app: ComfyApp): Promise<void>;
/**
* Allows the extension to register additional nodes with LGraph after standard nodes are added
* @param app The ComfyUI app instance
*/
registerCustomNodes(app: ComfyApp): Promise<void>;
/**
* Allows the extension to modify a node that has been reloaded onto the graph.
* If you break something in the backend and want to patch workflows in the frontend
* This is the place to do this
* @param node The node that has been loaded
* @param app The ComfyUI app instance
*/
loadedGraphNode(node: LGraphNode, app: ComfyApp);
/**
* Allows the extension to run code after the constructor of the node
* @param node The node that has been created
* @param app The ComfyUI app instance
*/
nodeCreated(node: LGraphNode, app: ComfyApp);
}
export type ComfyObjectInfo = {
name: string;
display_name?: string;
description?: string;
category: string;
input?: {
required?: Record<string, ComfyObjectInfoConfig>;
optional?: Record<string, ComfyObjectInfoConfig>;
};
output?: string[];
output_name: string[];
};
export type ComfyObjectInfoConfig = [string | any[]] | [string | any[], any];

1506
web/types/litegraph.d.ts vendored Normal file

File diff suppressed because it is too large Load Diff