Merge branch 'master' of github.com:comfyanonymous/ComfyUI

This commit is contained in:
doctorpangloss 2024-05-07 13:57:53 -07:00
commit 3a64e04a93
8 changed files with 175 additions and 100 deletions

View File

@ -448,29 +448,32 @@ The default installation includes a fast latent preview method that's low-resolu
## Keyboard Shortcuts
| Keybind | Explanation |
|---------------------------|--------------------------------------------------------------------------------------------------------------------|
| Ctrl + Enter | Queue up current graph for generation |
| Ctrl + Shift + Enter | Queue up current graph as first for generation |
| Ctrl + Z/Ctrl + Y | Undo/Redo |
| Ctrl + S | Save workflow |
| Ctrl + O | Load workflow |
| Ctrl + A | Select all nodes |
| Alt + C | Collapse/uncollapse selected nodes |
| Ctrl + M | Mute/unmute selected nodes |
| Ctrl + B | Bypass selected nodes (acts like the node was removed from the graph and the wires reconnected through) |
| Delete/Backspace | Delete selected nodes |
| Ctrl + Delete/Backspace | Delete the current graph |
| Space | Move the canvas around when held and moving the cursor |
| Ctrl/Shift + Click | Add clicked node to selection |
| Ctrl + C/Ctrl + V | Copy and paste selected nodes (without maintaining connections to outputs of unselected nodes) |
| Ctrl + C/Ctrl + Shift + V | Copy and paste selected nodes (maintaining connections from outputs of unselected nodes to inputs of pasted nodes) |
| Shift + Drag | Move multiple selected nodes at the same time |
| Ctrl + D | Load default graph |
| Q | Toggle visibility of the queue |
| H | Toggle visibility of history |
| R | Refresh graph |
| Double-Click LMB | Open node quick search palette |
| Keybind | Explanation |
|------------------------------------|--------------------------------------------------------------------------------------------------------------------|
| Ctrl + Enter | Queue up current graph for generation |
| Ctrl + Shift + Enter | Queue up current graph as first for generation |
| Ctrl + Z/Ctrl + Y | Undo/Redo |
| Ctrl + S | Save workflow |
| Ctrl + O | Load workflow |
| Ctrl + A | Select all nodes |
| Alt + C | Collapse/uncollapse selected nodes |
| Ctrl + M | Mute/unmute selected nodes |
| Ctrl + B | Bypass selected nodes (acts like the node was removed from the graph and the wires reconnected through) |
| Delete/Backspace | Delete selected nodes |
| Ctrl + Delete/Backspace | Delete the current graph |
| Space | Move the canvas around when held and moving the cursor |
| Ctrl/Shift + Click | Add clicked node to selection |
| Ctrl + C/Ctrl + V | Copy and paste selected nodes (without maintaining connections to outputs of unselected nodes) |
| Ctrl + C/Ctrl + Shift + V | Copy and paste selected nodes (maintaining connections from outputs of unselected nodes to inputs of pasted nodes) |
| Shift + Drag | Move multiple selected nodes at the same time |
| Ctrl + D | Load default graph |
| Alt + `+` | Canvas Zoom in |
| Alt + `-` | Canvas Zoom out |
| Ctrl + Shift + LMB + Vertical drag | Canvas Zoom in/out |
| Q | Toggle visibility of the queue |
| H | Toggle visibility of history |
| R | Refresh graph |
| Double-Click LMB | Open node quick search palette |
Ctrl can also be replaced with Cmd instead for macOS users

View File

@ -1,3 +1,4 @@
from PIL import Image, ImageFile, UnidentifiedImageError
def conditioning_set_values(conditioning, values={}):
c = []
@ -8,3 +9,17 @@ def conditioning_set_values(conditioning, values={}):
c.append(n)
return c
def open_image(path):
prev_value = None
try:
img = Image.open(path)
except (UnidentifiedImageError, ValueError): #PIL issues #4472 and #2445
prev_value = ImageFile.LOAD_TRUNCATED_IMAGES
ImageFile.LOAD_TRUNCATED_IMAGES = True
img = Image.open(path)
finally:
if prev_value is not None:
ImageFile.LOAD_TRUNCATED_IMAGES = prev_value
return img

View File

@ -7,7 +7,7 @@ import math
import random
import logging
from PIL import Image, ImageOps, ImageSequence
from PIL import Image, ImageOps, ImageSequence, ImageFile
from PIL.PngImagePlugin import PngInfo
from natsort import natsorted
from pkg_resources import resource_filename
@ -1460,9 +1460,18 @@ class LoadImage:
_, ext = os.path.splitext(image)
if ext == ".exr":
return load_exr(image_path, srgb=False)
with open_image(image_path) as img:
with node_helpers.open_image(image_path)(image_path) as img:
for i in ImageSequence.Iterator(img):
prev_value = None
try:
i = ImageOps.exif_transpose(i)
except OSError:
prev_value = ImageFile.LOAD_TRUNCATED_IMAGES
ImageFile.LOAD_TRUNCATED_IMAGES = True
i = ImageOps.exif_transpose(i)
finally:
if prev_value is not None:
ImageFile.LOAD_TRUNCATED_IMAGES = prev_value
if i.mode == 'I':
i = i.point(lambda i: i * (1 / 255))
image = i.convert("RGB")

View File

@ -14,7 +14,6 @@ from . import utils
from . import clip_vision
from . import gligen
from . import diffusers_convert
from . import model_base
from . import model_detection
from . import sd1_clip
@ -22,9 +21,9 @@ from . import sd2_clip
from . import sdxl_clip
from . import model_patcher
from . import model_sampling
from . import lora
from .t2i_adapter import adapter
from . import supported_models_base
from .taesd import taesd
def load_model_weights(model, sd):
@ -418,6 +417,8 @@ def load_gligen(ckpt_path):
return model_patcher.ModelPatcher(model, load_device=model_management.get_torch_device(), offload_device=model_management.unet_offload_device())
def load_checkpoint(config_path=None, ckpt_path=None, output_vae=True, output_clip=True, embedding_directory=None, state_dict=None, config=None):
logging.warning("Warning: The load checkpoint with config function is deprecated and will eventually be removed, please use the other one.")
model, clip, vae, _ = load_checkpoint_guess_config(ckpt_path, output_vae=output_vae, output_clip=output_clip, output_clipvision=False, embedding_directory=embedding_directory, output_model=True)
#TODO: this function is a mess and should be removed eventually
if config is None:
with open(config_path, 'r') as stream:
@ -425,81 +426,20 @@ def load_checkpoint(config_path=None, ckpt_path=None, output_vae=True, output_cl
model_config_params = config['model']['params']
clip_config = model_config_params['cond_stage_config']
scale_factor = model_config_params['scale_factor']
vae_config = model_config_params['first_stage_config']
fp16 = False
if "unet_config" in model_config_params:
if "params" in model_config_params["unet_config"]:
unet_config = model_config_params["unet_config"]["params"]
if "use_fp16" in unet_config:
fp16 = unet_config.pop("use_fp16")
if fp16:
unet_config["dtype"] = torch.float16
noise_aug_config = None
if "noise_aug_config" in model_config_params:
noise_aug_config = model_config_params["noise_aug_config"]
model_type = model_base.ModelType.EPS
if "parameterization" in model_config_params:
if model_config_params["parameterization"] == "v":
model_type = model_base.ModelType.V_PREDICTION
m = model.clone()
class ModelSamplingAdvanced(model_sampling.ModelSamplingDiscrete, model_sampling.V_PREDICTION):
pass
m.add_object_patch("model_sampling", ModelSamplingAdvanced(model.model.model_config))
model = m
clip = None
vae = None
layer_idx = clip_config.get("params", {}).get("layer_idx", None)
if layer_idx is not None:
clip.clip_layer(layer_idx)
class WeightsLoader(torch.nn.Module):
pass
if state_dict is None:
state_dict = utils.load_torch_file(ckpt_path)
class EmptyClass:
pass
model_config = supported_models_base.BASE({})
from . import latent_formats
model_config.latent_format = latent_formats.SD15(scale_factor=scale_factor)
model_config.unet_config = model_detection.convert_config(unet_config)
if config['model']["target"].endswith("ImageEmbeddingConditionedLatentDiffusion"):
model = model_base.SD21UNCLIP(model_config, noise_aug_config["params"], model_type=model_type)
else:
model = model_base.BaseModel(model_config, model_type=model_type)
if config['model']["target"].endswith("LatentInpaintDiffusion"):
model.set_inpaint()
if fp16:
model = model.half()
offload_device = model_management.unet_offload_device()
model = model.to(offload_device)
model.load_model_weights(state_dict, "model.diffusion_model.")
if output_vae:
vae_sd = utils.state_dict_prefix_replace(state_dict, {"first_stage_model.": ""}, filter_keys=True)
vae = VAE(sd=vae_sd, config=vae_config)
if output_clip:
w = WeightsLoader()
clip_target = EmptyClass()
clip_target.params = clip_config.get("params", {})
if clip_config["target"].endswith("FrozenOpenCLIPEmbedder"):
clip_target.clip = sd2_clip.SD2ClipModel
clip_target.tokenizer = sd2_clip.SD2Tokenizer
clip = CLIP(clip_target, embedding_directory=embedding_directory)
w.cond_stage_model = clip.cond_stage_model.clip_h
elif clip_config["target"].endswith("FrozenCLIPEmbedder"):
clip_target.clip = sd1_clip.SD1ClipModel
clip_target.tokenizer = sd1_clip.SD1Tokenizer
clip = CLIP(clip_target, embedding_directory=embedding_directory)
w.cond_stage_model = clip.cond_stage_model.clip_l
load_clip_weights(w, state_dict)
return (model_patcher.ModelPatcher(model, load_device=model_management.get_torch_device(), offload_device=offload_device), clip, vae)
return (model, clip, vae)
def load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, output_clipvision=False, embedding_directory=None, output_model=True):
sd = utils.load_torch_file(ckpt_path)

View File

@ -164,6 +164,41 @@ class MaskEditorDialog extends ComfyDialog {
return divElement;
}
createOpacitySlider(self, name, callback) {
const divElement = document.createElement('div');
divElement.id = "maskeditor-opacity-slider";
divElement.style.cssFloat = "left";
divElement.style.fontFamily = "sans-serif";
divElement.style.marginRight = "4px";
divElement.style.color = "var(--input-text)";
divElement.style.backgroundColor = "var(--comfy-input-bg)";
divElement.style.borderRadius = "8px";
divElement.style.borderColor = "var(--border-color)";
divElement.style.borderStyle = "solid";
divElement.style.fontSize = "15px";
divElement.style.height = "21px";
divElement.style.padding = "1px 6px";
divElement.style.display = "flex";
divElement.style.position = "relative";
divElement.style.top = "2px";
divElement.style.pointerEvents = "auto";
self.opacity_slider_input = document.createElement('input');
self.opacity_slider_input.setAttribute('type', 'range');
self.opacity_slider_input.setAttribute('min', '0.1');
self.opacity_slider_input.setAttribute('max', '1.0');
self.opacity_slider_input.setAttribute('step', '0.01')
self.opacity_slider_input.setAttribute('value', '0.7');
const labelElement = document.createElement("label");
labelElement.textContent = name;
divElement.appendChild(labelElement);
divElement.appendChild(self.opacity_slider_input);
self.opacity_slider_input.addEventListener("input", callback);
return divElement;
}
setlayout(imgCanvas, maskCanvas) {
const self = this;
@ -203,6 +238,13 @@ class MaskEditorDialog extends ComfyDialog {
self.updateBrushPreview(self, null, null);
});
this.brush_opacity_slider = this.createOpacitySlider(self, "Opacity", (event) => {
self.brush_opacity = event.target.value;
if (self.brush_color_mode !== "negative") {
self.maskCanvas.style.opacity = self.brush_opacity;
}
});
this.colorButton = this.createLeftButton(this.getColorButtonText(), () => {
if (self.brush_color_mode === "black") {
self.brush_color_mode = "white";
@ -237,6 +279,7 @@ class MaskEditorDialog extends ComfyDialog {
bottom_panel.appendChild(this.saveButton);
bottom_panel.appendChild(cancelButton);
bottom_panel.appendChild(this.brush_size_slider);
bottom_panel.appendChild(this.brush_opacity_slider);
bottom_panel.appendChild(this.colorButton);
imgCanvas.style.position = "absolute";
@ -472,7 +515,7 @@ class MaskEditorDialog extends ComfyDialog {
else {
return {
mixBlendMode: "initial",
opacity: "0.7",
opacity: this.brush_opacity,
};
}
}
@ -538,6 +581,7 @@ class MaskEditorDialog extends ComfyDialog {
this.maskCtx.putImageData(maskData, 0, 0);
}
brush_opacity = 0.7;
brush_size = 10;
brush_color_mode = "black";
drawing_mode = false;

View File

@ -953,6 +953,12 @@ export class ComfyApp {
const origProcessMouseDown = LGraphCanvas.prototype.processMouseDown;
LGraphCanvas.prototype.processMouseDown = function(e) {
// prepare for ctrl+shift drag: zoom start
if(e.ctrlKey && e.shiftKey && e.buttons) {
self.zoom_drag_start = [e.x, e.y, this.ds.scale];
return;
}
const res = origProcessMouseDown.apply(this, arguments);
this.selected_group_moving = false;
@ -973,6 +979,26 @@ export class ComfyApp {
const origProcessMouseMove = LGraphCanvas.prototype.processMouseMove;
LGraphCanvas.prototype.processMouseMove = function(e) {
// handle ctrl+shift drag
if(e.ctrlKey && e.shiftKey && self.zoom_drag_start) {
// stop canvas zoom action
if(!e.buttons) {
self.zoom_drag_start = null;
return;
}
// calculate delta
let deltaY = e.y - self.zoom_drag_start[1];
let startScale = self.zoom_drag_start[2];
let scale = startScale - deltaY/100;
this.ds.changeScale(scale, [this.ds.element.width/2, this.ds.element.height/2]);
this.graph.change();
return;
}
const orig_selected_group = this.selected_group;
if (this.selected_group && !this.selected_group_resizing && !this.selected_group_moving) {
@ -1059,6 +1085,20 @@ export class ComfyApp {
// Trigger onPaste
return true;
}
if((e.key === '+') && e.altKey) {
block_default = true;
let scale = this.ds.scale * 1.1;
this.ds.changeScale(scale, [this.ds.element.width/2, this.ds.element.height/2]);
this.graph.change();
}
if((e.key === '-') && e.altKey) {
block_default = true;
let scale = this.ds.scale * 1 / 1.1;
this.ds.changeScale(scale, [this.ds.element.width/2, this.ds.element.height/2]);
this.graph.change();
}
}
this.graph.change();

View File

@ -141,6 +141,7 @@ class SplitSigmas:
}
}
RETURN_TYPES = ("SIGMAS","SIGMAS")
RETURN_NAMES = ("high_sigmas", "low_sigmas")
CATEGORY = "sampling/custom_sampling/sigmas"
FUNCTION = "get_sigmas"
@ -150,6 +151,27 @@ class SplitSigmas:
sigmas2 = sigmas[step:]
return (sigmas1, sigmas2)
class SplitSigmasDenoise:
@classmethod
def INPUT_TYPES(s):
return {"required":
{"sigmas": ("SIGMAS", ),
"denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}),
}
}
RETURN_TYPES = ("SIGMAS","SIGMAS")
RETURN_NAMES = ("high_sigmas", "low_sigmas")
CATEGORY = "sampling/custom_sampling/sigmas"
FUNCTION = "get_sigmas"
def get_sigmas(self, sigmas, denoise):
steps = max(sigmas.shape[-1] - 1, 0)
total_steps = round(steps * denoise)
sigmas1 = sigmas[:-(total_steps)]
sigmas2 = sigmas[-(total_steps + 1):]
return (sigmas1, sigmas2)
class FlipSigmas:
@classmethod
def INPUT_TYPES(s):
@ -623,6 +645,7 @@ NODE_CLASS_MAPPINGS = {
"SamplerDPMPP_SDE": SamplerDPMPP_SDE,
"SamplerDPMAdaptative": SamplerDPMAdaptative,
"SplitSigmas": SplitSigmas,
"SplitSigmasDenoise": SplitSigmasDenoise,
"FlipSigmas": FlipSigmas,
"CFGGuider": CFGGuider,

View File

@ -20,8 +20,9 @@ class PatchModelAddDownscale:
CATEGORY = "_for_testing"
def patch(self, model, block_number, downscale_factor, start_percent, end_percent, downscale_after_skip, downscale_method, upscale_method):
sigma_start = model.model.model_sampling.percent_to_sigma(start_percent)
sigma_end = model.model.model_sampling.percent_to_sigma(end_percent)
model_sampling = model.get_model_object("model_sampling")
sigma_start = model_sampling.percent_to_sigma(start_percent)
sigma_end = model_sampling.percent_to_sigma(end_percent)
def input_block_patch(h, transformer_options):
if transformer_options["block"][1] == block_number: