Merge branch 'Main' into feature/preview-latent

This commit is contained in:
Lt.Dr.Data 2023-07-14 13:34:31 +09:00
commit dd7a19ae32
7 changed files with 387 additions and 25 deletions

299
comfy_extras/nodes_canny.py Normal file
View File

@ -0,0 +1,299 @@
#From https://github.com/kornia/kornia
import math
import torch
import torch.nn.functional as F
def get_canny_nms_kernel(device=None, dtype=None):
"""Utility function that returns 3x3 kernels for the Canny Non-maximal suppression."""
return torch.tensor(
[
[[[0.0, 0.0, 0.0], [0.0, 1.0, -1.0], [0.0, 0.0, 0.0]]],
[[[0.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, -1.0]]],
[[[0.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, -1.0, 0.0]]],
[[[0.0, 0.0, 0.0], [0.0, 1.0, 0.0], [-1.0, 0.0, 0.0]]],
[[[0.0, 0.0, 0.0], [-1.0, 1.0, 0.0], [0.0, 0.0, 0.0]]],
[[[-1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]]],
[[[0.0, -1.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]]],
[[[0.0, 0.0, -1.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]]],
],
device=device,
dtype=dtype,
)
def get_hysteresis_kernel(device=None, dtype=None):
"""Utility function that returns the 3x3 kernels for the Canny hysteresis."""
return torch.tensor(
[
[[[0.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, 0.0, 0.0]]],
[[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 1.0]]],
[[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 1.0, 0.0]]],
[[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [1.0, 0.0, 0.0]]],
[[[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, 0.0]]],
[[[1.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]],
[[[0.0, 1.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]],
[[[0.0, 0.0, 1.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]],
],
device=device,
dtype=dtype,
)
def gaussian_blur_2d(img, kernel_size, sigma):
ksize_half = (kernel_size - 1) * 0.5
x = torch.linspace(-ksize_half, ksize_half, steps=kernel_size)
pdf = torch.exp(-0.5 * (x / sigma).pow(2))
x_kernel = pdf / pdf.sum()
x_kernel = x_kernel.to(device=img.device, dtype=img.dtype)
kernel2d = torch.mm(x_kernel[:, None], x_kernel[None, :])
kernel2d = kernel2d.expand(img.shape[-3], 1, kernel2d.shape[0], kernel2d.shape[1])
padding = [kernel_size // 2, kernel_size // 2, kernel_size // 2, kernel_size // 2]
img = torch.nn.functional.pad(img, padding, mode="reflect")
img = torch.nn.functional.conv2d(img, kernel2d, groups=img.shape[-3])
return img
def get_sobel_kernel2d(device=None, dtype=None):
kernel_x = torch.tensor([[-1.0, 0.0, 1.0], [-2.0, 0.0, 2.0], [-1.0, 0.0, 1.0]], device=device, dtype=dtype)
kernel_y = kernel_x.transpose(0, 1)
return torch.stack([kernel_x, kernel_y])
def spatial_gradient(input, normalized: bool = True):
r"""Compute the first order image derivative in both x and y using a Sobel operator.
.. image:: _static/img/spatial_gradient.png
Args:
input: input image tensor with shape :math:`(B, C, H, W)`.
mode: derivatives modality, can be: `sobel` or `diff`.
order: the order of the derivatives.
normalized: whether the output is normalized.
Return:
the derivatives of the input feature map. with shape :math:`(B, C, 2, H, W)`.
.. note::
See a working example `here <https://kornia-tutorials.readthedocs.io/en/latest/
filtering_edges.html>`__.
Examples:
>>> input = torch.rand(1, 3, 4, 4)
>>> output = spatial_gradient(input) # 1x3x2x4x4
>>> output.shape
torch.Size([1, 3, 2, 4, 4])
"""
# KORNIA_CHECK_IS_TENSOR(input)
# KORNIA_CHECK_SHAPE(input, ['B', 'C', 'H', 'W'])
# allocate kernel
kernel = get_sobel_kernel2d(device=input.device, dtype=input.dtype)
if normalized:
kernel = normalize_kernel2d(kernel)
# prepare kernel
b, c, h, w = input.shape
tmp_kernel = kernel[:, None, ...]
# Pad with "replicate for spatial dims, but with zeros for channel
spatial_pad = [kernel.size(1) // 2, kernel.size(1) // 2, kernel.size(2) // 2, kernel.size(2) // 2]
out_channels: int = 2
padded_inp = torch.nn.functional.pad(input.reshape(b * c, 1, h, w), spatial_pad, 'replicate')
out = F.conv2d(padded_inp, tmp_kernel, groups=1, padding=0, stride=1)
return out.reshape(b, c, out_channels, h, w)
def rgb_to_grayscale(image, rgb_weights = None):
r"""Convert a RGB image to grayscale version of image.
.. image:: _static/img/rgb_to_grayscale.png
The image data is assumed to be in the range of (0, 1).
Args:
image: RGB image to be converted to grayscale with shape :math:`(*,3,H,W)`.
rgb_weights: Weights that will be applied on each channel (RGB).
The sum of the weights should add up to one.
Returns:
grayscale version of the image with shape :math:`(*,1,H,W)`.
.. note::
See a working example `here <https://kornia-tutorials.readthedocs.io/en/latest/
color_conversions.html>`__.
Example:
>>> input = torch.rand(2, 3, 4, 5)
>>> gray = rgb_to_grayscale(input) # 2x1x4x5
"""
if len(image.shape) < 3 or image.shape[-3] != 3:
raise ValueError(f"Input size must have a shape of (*, 3, H, W). Got {image.shape}")
if rgb_weights is None:
# 8 bit images
if image.dtype == torch.uint8:
rgb_weights = torch.tensor([76, 150, 29], device=image.device, dtype=torch.uint8)
# floating point images
elif image.dtype in (torch.float16, torch.float32, torch.float64):
rgb_weights = torch.tensor([0.299, 0.587, 0.114], device=image.device, dtype=image.dtype)
else:
raise TypeError(f"Unknown data type: {image.dtype}")
else:
# is tensor that we make sure is in the same device/dtype
rgb_weights = rgb_weights.to(image)
# unpack the color image channels with RGB order
r: Tensor = image[..., 0:1, :, :]
g: Tensor = image[..., 1:2, :, :]
b: Tensor = image[..., 2:3, :, :]
w_r, w_g, w_b = rgb_weights.unbind()
return w_r * r + w_g * g + w_b * b
def canny(
input,
low_threshold = 0.1,
high_threshold = 0.2,
kernel_size = 5,
sigma = 1,
hysteresis = True,
eps = 1e-6,
):
r"""Find edges of the input image and filters them using the Canny algorithm.
.. image:: _static/img/canny.png
Args:
input: input image tensor with shape :math:`(B,C,H,W)`.
low_threshold: lower threshold for the hysteresis procedure.
high_threshold: upper threshold for the hysteresis procedure.
kernel_size: the size of the kernel for the gaussian blur.
sigma: the standard deviation of the kernel for the gaussian blur.
hysteresis: if True, applies the hysteresis edge tracking.
Otherwise, the edges are divided between weak (0.5) and strong (1) edges.
eps: regularization number to avoid NaN during backprop.
Returns:
- the canny edge magnitudes map, shape of :math:`(B,1,H,W)`.
- the canny edge detection filtered by thresholds and hysteresis, shape of :math:`(B,1,H,W)`.
.. note::
See a working example `here <https://kornia-tutorials.readthedocs.io/en/latest/
canny.html>`__.
Example:
>>> input = torch.rand(5, 3, 4, 4)
>>> magnitude, edges = canny(input) # 5x3x4x4
>>> magnitude.shape
torch.Size([5, 1, 4, 4])
>>> edges.shape
torch.Size([5, 1, 4, 4])
"""
# KORNIA_CHECK_IS_TENSOR(input)
# KORNIA_CHECK_SHAPE(input, ['B', 'C', 'H', 'W'])
# KORNIA_CHECK(
# low_threshold <= high_threshold,
# "Invalid input thresholds. low_threshold should be smaller than the high_threshold. Got: "
# f"{low_threshold}>{high_threshold}",
# )
# KORNIA_CHECK(0 < low_threshold < 1, f'Invalid low threshold. Should be in range (0, 1). Got: {low_threshold}')
# KORNIA_CHECK(0 < high_threshold < 1, f'Invalid high threshold. Should be in range (0, 1). Got: {high_threshold}')
device = input.device
dtype = input.dtype
# To Grayscale
if input.shape[1] == 3:
input = rgb_to_grayscale(input)
# Gaussian filter
blurred: Tensor = gaussian_blur_2d(input, kernel_size, sigma)
# Compute the gradients
gradients: Tensor = spatial_gradient(blurred, normalized=False)
# Unpack the edges
gx: Tensor = gradients[:, :, 0]
gy: Tensor = gradients[:, :, 1]
# Compute gradient magnitude and angle
magnitude: Tensor = torch.sqrt(gx * gx + gy * gy + eps)
angle: Tensor = torch.atan2(gy, gx)
# Radians to Degrees
angle = 180.0 * angle / math.pi
# Round angle to the nearest 45 degree
angle = torch.round(angle / 45) * 45
# Non-maximal suppression
nms_kernels: Tensor = get_canny_nms_kernel(device, dtype)
nms_magnitude: Tensor = F.conv2d(magnitude, nms_kernels, padding=nms_kernels.shape[-1] // 2)
# Get the indices for both directions
positive_idx: Tensor = (angle / 45) % 8
positive_idx = positive_idx.long()
negative_idx: Tensor = ((angle / 45) + 4) % 8
negative_idx = negative_idx.long()
# Apply the non-maximum suppression to the different directions
channel_select_filtered_positive: Tensor = torch.gather(nms_magnitude, 1, positive_idx)
channel_select_filtered_negative: Tensor = torch.gather(nms_magnitude, 1, negative_idx)
channel_select_filtered: Tensor = torch.stack(
[channel_select_filtered_positive, channel_select_filtered_negative], 1
)
is_max: Tensor = channel_select_filtered.min(dim=1)[0] > 0.0
magnitude = magnitude * is_max
# Threshold
edges: Tensor = F.threshold(magnitude, low_threshold, 0.0)
low: Tensor = magnitude > low_threshold
high: Tensor = magnitude > high_threshold
edges = low * 0.5 + high * 0.5
edges = edges.to(dtype)
# Hysteresis
if hysteresis:
edges_old: Tensor = -torch.ones(edges.shape, device=edges.device, dtype=dtype)
hysteresis_kernels: Tensor = get_hysteresis_kernel(device, dtype)
while ((edges_old - edges).abs() != 0).any():
weak: Tensor = (edges == 0.5).float()
strong: Tensor = (edges == 1).float()
hysteresis_magnitude: Tensor = F.conv2d(
edges, hysteresis_kernels, padding=hysteresis_kernels.shape[-1] // 2
)
hysteresis_magnitude = (hysteresis_magnitude == 1).any(1, keepdim=True).to(dtype)
hysteresis_magnitude = hysteresis_magnitude * weak + strong
edges_old = edges.clone()
edges = hysteresis_magnitude + (hysteresis_magnitude == 0) * weak * 0.5
edges = hysteresis_magnitude
return magnitude, edges
class Canny:
@classmethod
def INPUT_TYPES(s):
return {"required": {"image": ("IMAGE",),
"low_threshold": ("FLOAT", {"default": 0.4, "min": 0.01, "max": 0.99, "step": 0.01}),
"high_threshold": ("FLOAT", {"default": 0.8, "min": 0.01, "max": 0.99, "step": 0.01})
}}
RETURN_TYPES = ("IMAGE",)
FUNCTION = "detect_edge"
CATEGORY = "image/preprocessors"
def detect_edge(self, image, low_threshold, high_threshold):
output = canny(image.movedim(-1, 1), low_threshold, high_threshold)
img_out = output[1].repeat(1, 3, 1, 1).movedim(1, -1)
return (img_out,)
NODE_CLASS_MAPPINGS = {
"Canny": Canny,
}

22
main.py
View File

@ -1,22 +1,24 @@
import os import os
import importlib.util import importlib.util
import folder_paths import folder_paths
import time
def execute_prestartup_script(): def execute_prestartup_script():
def execute_script(script_path): def execute_script(script_path):
if os.path.exists(script_path):
module_name = os.path.splitext(script_path)[0] module_name = os.path.splitext(script_path)[0]
try: try:
spec = importlib.util.spec_from_file_location(module_name, script_path) spec = importlib.util.spec_from_file_location(module_name, script_path)
module = importlib.util.module_from_spec(spec) module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module) spec.loader.exec_module(module)
return True
except Exception as e: except Exception as e:
print(f"Failed to execute startup-script: {script_path} / {e}") print(f"Failed to execute startup-script: {script_path} / {e}")
return False
node_paths = folder_paths.get_folder_paths("custom_nodes") node_paths = folder_paths.get_folder_paths("custom_nodes")
for custom_node_path in node_paths: for custom_node_path in node_paths:
possible_modules = os.listdir(custom_node_path) possible_modules = os.listdir(custom_node_path)
node_prestartup_times = []
for possible_module in possible_modules: for possible_module in possible_modules:
module_path = os.path.join(custom_node_path, possible_module) module_path = os.path.join(custom_node_path, possible_module)
@ -24,8 +26,19 @@ def execute_prestartup_script():
continue continue
script_path = os.path.join(module_path, "prestartup_script.py") script_path = os.path.join(module_path, "prestartup_script.py")
execute_script(script_path) if os.path.exists(script_path):
time_before = time.perf_counter()
success = execute_script(script_path)
node_prestartup_times.append((time.perf_counter() - time_before, module_path, success))
if len(node_prestartup_times) > 0:
print("\nPrestartup times for custom nodes:")
for n in sorted(node_prestartup_times):
if n[2]:
import_message = ""
else:
import_message = " (PRESTARTUP FAILED)"
print("{:6.1f} seconds{}:".format(n[0], import_message), n[1])
print()
execute_prestartup_script() execute_prestartup_script()
@ -36,7 +49,6 @@ import itertools
import shutil import shutil
import threading import threading
import gc import gc
import time
from comfy.cli_args import args from comfy.cli_args import args
import comfy.utils import comfy.utils

View File

@ -9,7 +9,7 @@ import math
import time import time
import random import random
from PIL import Image, ImageDraw, ImageOps from PIL import Image, ImageOps
from PIL.PngImagePlugin import PngInfo from PIL.PngImagePlugin import PngInfo
import numpy as np import numpy as np
import safetensors.torch import safetensors.torch
@ -116,7 +116,7 @@ class ConditioningConcat:
RETURN_TYPES = ("CONDITIONING",) RETURN_TYPES = ("CONDITIONING",)
FUNCTION = "concat" FUNCTION = "concat"
CATEGORY = "advanced/conditioning" CATEGORY = "conditioning"
def concat(self, conditioning_to, conditioning_from): def concat(self, conditioning_to, conditioning_from):
out = [] out = []
@ -1531,6 +1531,7 @@ NODE_CLASS_MAPPINGS = {
"ImagePadForOutpaint": ImagePadForOutpaint, "ImagePadForOutpaint": ImagePadForOutpaint,
"ConditioningAverage ": ConditioningAverage , "ConditioningAverage ": ConditioningAverage ,
"ConditioningCombine": ConditioningCombine, "ConditioningCombine": ConditioningCombine,
"ConditioningConcat": ConditioningConcat,
"ConditioningSetArea": ConditioningSetArea, "ConditioningSetArea": ConditioningSetArea,
"ConditioningSetMask": ConditioningSetMask, "ConditioningSetMask": ConditioningSetMask,
"KSamplerAdvanced": KSamplerAdvanced, "KSamplerAdvanced": KSamplerAdvanced,
@ -1564,7 +1565,6 @@ NODE_CLASS_MAPPINGS = {
"SaveLatent": SaveLatent, "SaveLatent": SaveLatent,
"ConditioningZeroOut": ConditioningZeroOut, "ConditioningZeroOut": ConditioningZeroOut,
"ConditioningConcat": ConditioningConcat,
"SavePreviewLatent": SavePreviewLatent, "SavePreviewLatent": SavePreviewLatent,
} }
@ -1590,6 +1590,7 @@ NODE_DISPLAY_NAME_MAPPINGS = {
"CLIPSetLastLayer": "CLIP Set Last Layer", "CLIPSetLastLayer": "CLIP Set Last Layer",
"ConditioningCombine": "Conditioning (Combine)", "ConditioningCombine": "Conditioning (Combine)",
"ConditioningAverage ": "Conditioning (Average)", "ConditioningAverage ": "Conditioning (Average)",
"ConditioningConcat": "Conditioning (Concat)",
"ConditioningSetArea": "Conditioning (Set Area)", "ConditioningSetArea": "Conditioning (Set Area)",
"ConditioningSetMask": "Conditioning (Set Mask)", "ConditioningSetMask": "Conditioning (Set Mask)",
"ControlNetApply": "Apply ControlNet", "ControlNetApply": "Apply ControlNet",
@ -1622,7 +1623,7 @@ NODE_DISPLAY_NAME_MAPPINGS = {
"VAEEncodeTiled": "VAE Encode (Tiled)", "VAEEncodeTiled": "VAE Encode (Tiled)",
} }
def load_custom_node(module_path): def load_custom_node(module_path, ignore=set()):
module_name = os.path.basename(module_path) module_name = os.path.basename(module_path)
if os.path.isfile(module_path): if os.path.isfile(module_path):
sp = os.path.splitext(module_path) sp = os.path.splitext(module_path)
@ -1636,7 +1637,9 @@ def load_custom_node(module_path):
sys.modules[module_name] = module sys.modules[module_name] = module
module_spec.loader.exec_module(module) module_spec.loader.exec_module(module)
if hasattr(module, "NODE_CLASS_MAPPINGS") and getattr(module, "NODE_CLASS_MAPPINGS") is not None: if hasattr(module, "NODE_CLASS_MAPPINGS") and getattr(module, "NODE_CLASS_MAPPINGS") is not None:
NODE_CLASS_MAPPINGS.update(module.NODE_CLASS_MAPPINGS) for name in module.NODE_CLASS_MAPPINGS:
if name not in ignore:
NODE_CLASS_MAPPINGS[name] = module.NODE_CLASS_MAPPINGS[name]
if hasattr(module, "NODE_DISPLAY_NAME_MAPPINGS") and getattr(module, "NODE_DISPLAY_NAME_MAPPINGS") is not None: if hasattr(module, "NODE_DISPLAY_NAME_MAPPINGS") and getattr(module, "NODE_DISPLAY_NAME_MAPPINGS") is not None:
NODE_DISPLAY_NAME_MAPPINGS.update(module.NODE_DISPLAY_NAME_MAPPINGS) NODE_DISPLAY_NAME_MAPPINGS.update(module.NODE_DISPLAY_NAME_MAPPINGS)
return True return True
@ -1649,6 +1652,7 @@ def load_custom_node(module_path):
return False return False
def load_custom_nodes(): def load_custom_nodes():
base_node_names = set(NODE_CLASS_MAPPINGS.keys())
node_paths = folder_paths.get_folder_paths("custom_nodes") node_paths = folder_paths.get_folder_paths("custom_nodes")
node_import_times = [] node_import_times = []
for custom_node_path in node_paths: for custom_node_path in node_paths:
@ -1661,7 +1665,7 @@ def load_custom_nodes():
if os.path.isfile(module_path) and os.path.splitext(module_path)[1] != ".py": continue if os.path.isfile(module_path) and os.path.splitext(module_path)[1] != ".py": continue
if module_path.endswith(".disabled"): continue if module_path.endswith(".disabled"): continue
time_before = time.perf_counter() time_before = time.perf_counter()
success = load_custom_node(module_path) success = load_custom_node(module_path, base_node_names)
node_import_times.append((time.perf_counter() - time_before, module_path, success)) node_import_times.append((time.perf_counter() - time_before, module_path, success))
if len(node_import_times) > 0: if len(node_import_times) > 0:
@ -1683,4 +1687,5 @@ def init_custom_nodes():
load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_model_merging.py")) load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_model_merging.py"))
load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_tomesd.py")) load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_tomesd.py"))
load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_clip_sdxl.py")) load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_clip_sdxl.py"))
load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_canny.py"))
load_custom_nodes() load_custom_nodes()

View File

@ -444,7 +444,8 @@ class PromptServer():
prompt_id = str(uuid.uuid4()) prompt_id = str(uuid.uuid4())
outputs_to_execute = valid[2] outputs_to_execute = valid[2]
self.prompt_queue.put((number, prompt_id, prompt, extra_data, outputs_to_execute)) self.prompt_queue.put((number, prompt_id, prompt, extra_data, outputs_to_execute))
return web.json_response({"prompt_id": prompt_id, "number": number}) response = {"prompt_id": prompt_id, "number": number, "node_errors": valid[3]}
return web.json_response(response)
else: else:
print("invalid prompt:", valid[1]) print("invalid prompt:", valid[1])
return web.json_response({"error": valid[1], "node_errors": valid[3]}, status=400) return web.json_response({"error": valid[1], "node_errors": valid[3]}, status=400)

View File

@ -202,6 +202,8 @@ class ComfyApi extends EventTarget {
response: await res.json(), response: await res.json(),
}; };
} }
return await res.json();
} }
/** /**

View File

@ -836,7 +836,7 @@ export class ComfyApp {
LGraphCanvas.prototype.drawNodeShape = function (node, ctx, size, fgcolor, bgcolor, selected, mouse_over) { LGraphCanvas.prototype.drawNodeShape = function (node, ctx, size, fgcolor, bgcolor, selected, mouse_over) {
const res = origDrawNodeShape.apply(this, arguments); const res = origDrawNodeShape.apply(this, arguments);
const nodeErrors = self.lastPromptError?.node_errors[node.id]; const nodeErrors = self.lastNodeErrors?.[node.id];
let color = null; let color = null;
let lineWidth = 1; let lineWidth = 1;
@ -845,7 +845,7 @@ export class ComfyApp {
} else if (self.dragOverNode && node.id === self.dragOverNode.id) { } else if (self.dragOverNode && node.id === self.dragOverNode.id) {
color = "dodgerblue"; color = "dodgerblue";
} }
else if (self.lastPromptError != null && nodeErrors?.errors) { else if (nodeErrors?.errors) {
color = "red"; color = "red";
lineWidth = 2; lineWidth = 2;
} }
@ -1413,7 +1413,7 @@ export class ComfyApp {
} }
this.#processingQueue = true; this.#processingQueue = true;
this.lastPromptError = null; this.lastNodeErrors = null;
try { try {
while (this.#queueItems.length) { while (this.#queueItems.length) {
@ -1423,12 +1423,16 @@ export class ComfyApp {
const p = await this.graphToPrompt(); const p = await this.graphToPrompt();
try { try {
await api.queuePrompt(number, p); const res = await api.queuePrompt(number, p);
this.lastNodeErrors = res.node_errors;
if (this.lastNodeErrors.length > 0) {
this.canvas.draw(true, true);
}
} catch (error) { } catch (error) {
const formattedError = this.#formatPromptError(error) const formattedError = this.#formatPromptError(error)
this.ui.dialog.show(formattedError); this.ui.dialog.show(formattedError);
if (error.response) { if (error.response) {
this.lastPromptError = error.response; this.lastNodeErrors = error.response.node_errors;
this.canvas.draw(true, true); this.canvas.draw(true, true);
} }
break; break;
@ -1534,7 +1538,7 @@ export class ComfyApp {
clean() { clean() {
this.nodeOutputs = {}; this.nodeOutputs = {};
this.nodePreviewImages = {} this.nodePreviewImages = {}
this.lastPromptError = null; this.lastNodeErrors = null;
this.lastExecutionError = null; this.lastExecutionError = null;
this.runningNodeId = null; this.runningNodeId = null;
} }

View File

@ -670,6 +670,37 @@ export class ComfyUI {
}, 0); }, 0);
}, },
}), }),
$el("button", {
id: "comfy-dev-save-api-button",
textContent: "Save (API Format)",
style: {width: "100%", display: "none"},
onclick: () => {
let filename = "workflow_api.json";
if (promptFilename.value) {
filename = prompt("Save workflow (API) as:", filename);
if (!filename) return;
if (!filename.toLowerCase().endsWith(".json")) {
filename += ".json";
}
}
app.graphToPrompt().then(p=>{
const json = JSON.stringify(p.output, null, 2); // convert the data to a JSON string
const blob = new Blob([json], {type: "application/json"});
const url = URL.createObjectURL(blob);
const a = $el("a", {
href: url,
download: filename,
style: {display: "none"},
parent: document.body,
});
a.click();
setTimeout(function () {
a.remove();
window.URL.revokeObjectURL(url);
}, 0);
});
},
}),
$el("button", {id: "comfy-load-button", textContent: "Load", onclick: () => fileInput.click()}), $el("button", {id: "comfy-load-button", textContent: "Load", onclick: () => fileInput.click()}),
$el("button", { $el("button", {
id: "comfy-refresh-button", id: "comfy-refresh-button",
@ -694,6 +725,14 @@ export class ComfyUI {
}), }),
]); ]);
const devMode = this.settings.addSetting({
id: "Comfy.DevMode",
name: "Enable Dev mode Options",
type: "boolean",
defaultValue: false,
onChange: function(value) { document.getElementById("comfy-dev-save-api-button").style.display = value ? "block" : "none"},
});
dragElement(this.menuContainer, this.settings); dragElement(this.menuContainer, this.settings);
this.setStatus({exec_info: {queue_remaining: "X"}}); this.setStatus({exec_info: {queue_remaining: "X"}});