diff --git a/README.md b/README.md index baa8cf8b6..d83b4bdac 100644 --- a/README.md +++ b/README.md @@ -77,9 +77,9 @@ Simply download, extract with [7-Zip](https://7-zip.org) and run. Make sure you See the [Config file](extra_model_paths.yaml.example) to set the search paths for models. In the standalone windows build you can find this file in the ComfyUI directory. Rename this file to extra_model_paths.yaml and edit it with your favorite text editor. -## Colab Notebook +## Jupyter Notebook -To run it on colab or paperspace you can use my [Colab Notebook](notebooks/comfyui_colab.ipynb) here: [Link to open with google colab](https://colab.research.google.com/github/comfyanonymous/ComfyUI/blob/master/notebooks/comfyui_colab.ipynb) +To run it on services like paperspace, kaggle or colab you can use my [Jupyter Notebook](notebooks/comfyui_colab.ipynb) ## Manual Install (Windows, Linux) diff --git a/comfy/clip_vision.py b/comfy/clip_vision.py index daaa2f2bf..9b95ae003 100644 --- a/comfy/clip_vision.py +++ b/comfy/clip_vision.py @@ -49,12 +49,16 @@ class ClipVisionModel(): precision_scope = lambda a, b: contextlib.nullcontext(a) with precision_scope(comfy.model_management.get_autocast_device(self.load_device), torch.float32): - outputs = self.model(pixel_values=pixel_values) + outputs = self.model(pixel_values=pixel_values, output_hidden_states=True) for k in outputs: t = outputs[k] if t is not None: - outputs[k] = t.cpu() + if k == 'hidden_states': + outputs["penultimate_hidden_states"] = t[-2].cpu() + else: + outputs[k] = t.cpu() + return outputs def convert_to_transformers(sd, prefix): diff --git a/comfy/controlnet.py b/comfy/controlnet.py index 490be6bbc..af0df103e 100644 --- a/comfy/controlnet.py +++ b/comfy/controlnet.py @@ -449,10 +449,18 @@ class T2IAdapter(ControlBase): return c def load_t2i_adapter(t2i_data): - keys = t2i_data.keys() - if 'adapter' in keys: + if 'adapter' in t2i_data: t2i_data = t2i_data['adapter'] - keys = t2i_data.keys() + if 'adapter.body.0.resnets.0.block1.weight' in t2i_data: #diffusers format + prefix_replace = {} + for i in range(4): + for j in range(2): + prefix_replace["adapter.body.{}.resnets.{}.".format(i, j)] = "body.{}.".format(i * 2 + j) + prefix_replace["adapter.body.{}.".format(i, j)] = "body.{}.".format(i * 2) + prefix_replace["adapter."] = "" + t2i_data = comfy.utils.state_dict_prefix_replace(t2i_data, prefix_replace) + keys = t2i_data.keys() + if "body.0.in_conv.weight" in keys: cin = t2i_data['body.0.in_conv.weight'].shape[1] model_ad = comfy.t2i_adapter.adapter.Adapter_light(cin=cin, channels=[320, 640, 1280, 1280], nums_rb=4) diff --git a/comfy/ldm/modules/attention.py b/comfy/ldm/modules/attention.py index 9fdfbd217..34484b288 100644 --- a/comfy/ldm/modules/attention.py +++ b/comfy/ldm/modules/attention.py @@ -323,8 +323,7 @@ class CrossAttentionDoggettx(nn.Module): break except model_management.OOM_EXCEPTION as e: if first_op_done == False: - torch.cuda.empty_cache() - torch.cuda.ipc_collect() + model_management.soft_empty_cache(True) if cleared_cache == False: cleared_cache = True print("out of memory error, emptying cache and trying again") diff --git a/comfy/ldm/modules/diffusionmodules/model.py b/comfy/ldm/modules/diffusionmodules/model.py index 431548483..5f38640c3 100644 --- a/comfy/ldm/modules/diffusionmodules/model.py +++ b/comfy/ldm/modules/diffusionmodules/model.py @@ -186,6 +186,7 @@ def slice_attention(q, k, v): del s2 break except model_management.OOM_EXCEPTION as e: + model_management.soft_empty_cache(True) steps *= 2 if steps > 128: raise e diff --git a/comfy/model_management.py b/comfy/model_management.py index aca8af999..b663e8f59 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -58,8 +58,15 @@ except: if args.cpu: cpu_state = CPUState.CPU -def get_torch_device(): +def is_intel_xpu(): + global cpu_state global xpu_available + if cpu_state == CPUState.GPU: + if xpu_available: + return True + return False + +def get_torch_device(): global directml_enabled global cpu_state if directml_enabled: @@ -70,13 +77,12 @@ def get_torch_device(): if cpu_state == CPUState.CPU: return torch.device("cpu") else: - if xpu_available: + if is_intel_xpu(): return torch.device("xpu") else: return torch.device(torch.cuda.current_device()) def get_total_memory(dev=None, torch_total_too=False): - global xpu_available global directml_enabled if dev is None: dev = get_torch_device() @@ -88,7 +94,7 @@ def get_total_memory(dev=None, torch_total_too=False): if directml_enabled: mem_total = 1024 * 1024 * 1024 #TODO mem_total_torch = mem_total - elif xpu_available: + elif is_intel_xpu(): stats = torch.xpu.memory_stats(dev) mem_reserved = stats['reserved_bytes.all.current'] mem_total = torch.xpu.get_device_properties(dev).total_memory @@ -146,11 +152,11 @@ def is_nvidia(): if cpu_state == CPUState.GPU: if torch.version.cuda: return True + return False ENABLE_PYTORCH_ATTENTION = args.use_pytorch_cross_attention VAE_DTYPE = torch.float32 - try: if is_nvidia(): torch_version = torch.version.__version__ @@ -162,6 +168,9 @@ try: except: pass +if is_intel_xpu(): + VAE_DTYPE = torch.bfloat16 + if args.fp16_vae: VAE_DTYPE = torch.float16 elif args.bf16_vae: @@ -220,7 +229,6 @@ if DISABLE_SMART_MEMORY: print("Disabling smart memory management") def get_torch_device_name(device): - global xpu_available if hasattr(device, 'type'): if device.type == "cuda": try: @@ -230,7 +238,7 @@ def get_torch_device_name(device): return "{} {} : {}".format(device, torch.cuda.get_device_name(device), allocator_backend) else: return "{}".format(device.type) - elif xpu_available: + elif is_intel_xpu(): return "{} {}".format(device, torch.xpu.get_device_name(device)) else: return "CUDA {}: {}".format(device, torch.cuda.get_device_name(device)) @@ -260,7 +268,6 @@ class LoadedModel: return self.model_memory() def model_load(self, lowvram_model_memory=0): - global xpu_available patch_model_to = None if lowvram_model_memory == 0: patch_model_to = self.device @@ -281,7 +288,7 @@ class LoadedModel: accelerate.dispatch_model(self.real_model, device_map=device_map, main_device=self.device) self.model_accelerated = True - if xpu_available and not args.disable_ipex_optimize: + if is_intel_xpu() and not args.disable_ipex_optimize: self.real_model = torch.xpu.optimize(self.real_model.eval(), inplace=True, auto_kernel_selection=True, graph_mode=True) return self.real_model @@ -471,12 +478,11 @@ def get_autocast_device(dev): def xformers_enabled(): - global xpu_available global directml_enabled global cpu_state if cpu_state != CPUState.GPU: return False - if xpu_available: + if is_intel_xpu(): return False if directml_enabled: return False @@ -503,7 +509,6 @@ def pytorch_attention_flash_attention(): return False def get_free_memory(dev=None, torch_free_too=False): - global xpu_available global directml_enabled if dev is None: dev = get_torch_device() @@ -515,7 +520,7 @@ def get_free_memory(dev=None, torch_free_too=False): if directml_enabled: mem_free_total = 1024 * 1024 * 1024 #TODO mem_free_torch = mem_free_total - elif xpu_available: + elif is_intel_xpu(): stats = torch.xpu.memory_stats(dev) mem_active = stats['active_bytes.all.current'] mem_allocated = stats['allocated_bytes.all.current'] @@ -577,7 +582,6 @@ def is_device_mps(device): return False def should_use_fp16(device=None, model_params=0, prioritize_performance=True): - global xpu_available global directml_enabled if device is not None: @@ -600,7 +604,7 @@ def should_use_fp16(device=None, model_params=0, prioritize_performance=True): if cpu_mode() or mps_mode(): return False #TODO ? - if xpu_available: + if is_intel_xpu(): return True if torch.cuda.is_bf16_supported(): @@ -635,15 +639,14 @@ def should_use_fp16(device=None, model_params=0, prioritize_performance=True): return True -def soft_empty_cache(): - global xpu_available +def soft_empty_cache(force=False): global cpu_state if cpu_state == CPUState.MPS: torch.mps.empty_cache() - elif xpu_available: + elif is_intel_xpu(): torch.xpu.empty_cache() elif torch.cuda.is_available(): - if is_nvidia(): #This seems to make things worse on ROCm so I only do it for cuda + if force or is_nvidia(): #This seems to make things worse on ROCm so I only do it for cuda torch.cuda.empty_cache() torch.cuda.ipc_collect() diff --git a/comfy/samplers.py b/comfy/samplers.py index 103ac33ff..c60288fd1 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -263,8 +263,6 @@ def sampling_function(model_function, x, timestep, uncond, cond, cond_scale, con output = model_function(input_x, timestep_, **c).chunk(batch_chunks) del input_x - model_management.throw_exception_if_processing_interrupted() - for o in range(batch_chunks): if cond_or_uncond[o] == COND: out_cond[:,:,area[o][2]:area[o][0] + area[o][2],area[o][3]:area[o][1] + area[o][3]] += output[o] * mult[o] @@ -390,11 +388,20 @@ def get_mask_aabb(masks): return bounding_boxes, is_empty -def resolve_cond_masks(conditions, h, w, device): +def resolve_areas_and_cond_masks(conditions, h, w, device): # We need to decide on an area outside the sampling loop in order to properly generate opposite areas of equal sizes. # While we're doing this, we can also resolve the mask device and scaling for performance reasons for i in range(len(conditions)): c = conditions[i] + if 'area' in c[1]: + area = c[1]['area'] + if area[0] == "percentage": + modified = c[1].copy() + area = (max(1, round(area[1] * h)), max(1, round(area[2] * w)), round(area[3] * h), round(area[4] * w)) + modified['area'] = area + c = [c[0], modified] + conditions[i] = c + if 'mask' in c[1]: mask = c[1]['mask'] mask = mask.to(device=device) @@ -622,8 +629,8 @@ class KSampler: positive = positive[:] negative = negative[:] - resolve_cond_masks(positive, noise.shape[2], noise.shape[3], self.device) - resolve_cond_masks(negative, noise.shape[2], noise.shape[3], self.device) + resolve_areas_and_cond_masks(positive, noise.shape[2], noise.shape[3], self.device) + resolve_areas_and_cond_masks(negative, noise.shape[2], noise.shape[3], self.device) calculate_start_end_timesteps(self.model_wrap, negative) calculate_start_end_timesteps(self.model_wrap, positive) diff --git a/comfy_extras/chainner_models/architecture/SwinIR.py b/comfy_extras/chainner_models/architecture/SwinIR.py index 1abf450bb..439dcbcb2 100644 --- a/comfy_extras/chainner_models/architecture/SwinIR.py +++ b/comfy_extras/chainner_models/architecture/SwinIR.py @@ -846,6 +846,7 @@ class SwinIR(nn.Module): num_in_ch = in_chans num_out_ch = in_chans supports_fp16 = True + self.start_unshuffle = 1 self.model_arch = "SwinIR" self.sub_type = "SR" @@ -874,6 +875,11 @@ class SwinIR(nn.Module): else 64 ) + if "conv_first.1.weight" in self.state: + self.state["conv_first.weight"] = self.state.pop("conv_first.1.weight") + self.state["conv_first.bias"] = self.state.pop("conv_first.1.bias") + self.start_unshuffle = round(math.sqrt(self.state["conv_first.weight"].shape[1] // 3)) + num_in_ch = self.state["conv_first.weight"].shape[1] in_chans = num_in_ch if "conv_last.weight" in state_keys: @@ -968,7 +974,7 @@ class SwinIR(nn.Module): self.depths = depths self.window_size = window_size self.mlp_ratio = mlp_ratio - self.scale = upscale + self.scale = upscale / self.start_unshuffle self.upsampler = upsampler self.img_size = img_size self.img_range = img_range @@ -1101,6 +1107,9 @@ class SwinIR(nn.Module): self.conv_up1 = nn.Conv2d(num_feat, num_feat, 3, 1, 1) if self.upscale == 4: self.conv_up2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1) + elif self.upscale == 8: + self.conv_up2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1) + self.conv_up3 = nn.Conv2d(num_feat, num_feat, 3, 1, 1) self.conv_hr = nn.Conv2d(num_feat, num_feat, 3, 1, 1) self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1) self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True) @@ -1157,6 +1166,9 @@ class SwinIR(nn.Module): self.mean = self.mean.type_as(x) x = (x - self.mean) * self.img_range + if self.start_unshuffle > 1: + x = torch.nn.functional.pixel_unshuffle(x, self.start_unshuffle) + if self.upsampler == "pixelshuffle": # for classical SR x = self.conv_first(x) @@ -1186,6 +1198,9 @@ class SwinIR(nn.Module): ) ) ) + elif self.upscale == 8: + x = self.lrelu(self.conv_up2(torch.nn.functional.interpolate(x, scale_factor=2, mode='nearest'))) + x = self.lrelu(self.conv_up3(torch.nn.functional.interpolate(x, scale_factor=2, mode='nearest'))) x = self.conv_last(self.lrelu(self.conv_hr(x))) else: # for image denoising and JPEG compression artifact reduction diff --git a/comfy_extras/nodes_upscale_model.py b/comfy_extras/nodes_upscale_model.py index abd182e6e..2b5e49a55 100644 --- a/comfy_extras/nodes_upscale_model.py +++ b/comfy_extras/nodes_upscale_model.py @@ -18,6 +18,8 @@ class UpscaleModelLoader: def load_model(self, model_name): model_path = folder_paths.get_full_path("upscale_models", model_name) sd = comfy.utils.load_torch_file(model_path, safe_load=True) + if "module.layers.0.residual_group.blocks.0.norm1.weight" in sd: + sd = comfy.utils.state_dict_prefix_replace(sd, {"module.":""}) out = model_loading.load_state_dict(sd).eval() return (out, ) diff --git a/execution.py b/execution.py index e10fdbb60..5f5d6c738 100644 --- a/execution.py +++ b/execution.py @@ -21,7 +21,8 @@ def get_input_data(inputs, class_def, unique_id, outputs={}, prompt={}, extra_da input_unique_id = input_data[0] output_index = input_data[1] if input_unique_id not in outputs: - return None + input_data_all[x] = (None,) + continue obj = outputs[input_unique_id][output_index] input_data_all[x] = obj else: diff --git a/folder_paths.py b/folder_paths.py index e321690dd..4a10c68e7 100644 --- a/folder_paths.py +++ b/folder_paths.py @@ -1,14 +1,13 @@ import os import time -supported_ckpt_extensions = set(['.ckpt', '.pth', '.safetensors']) supported_pt_extensions = set(['.ckpt', '.pt', '.bin', '.pth', '.safetensors']) folder_names_and_paths = {} base_path = os.path.dirname(os.path.realpath(__file__)) models_dir = os.path.join(base_path, "models") -folder_names_and_paths["checkpoints"] = ([os.path.join(models_dir, "checkpoints")], supported_ckpt_extensions) +folder_names_and_paths["checkpoints"] = ([os.path.join(models_dir, "checkpoints")], supported_pt_extensions) folder_names_and_paths["configs"] = ([os.path.join(models_dir, "configs")], [".yaml"]) folder_names_and_paths["loras"] = ([os.path.join(models_dir, "loras")], supported_pt_extensions) @@ -121,17 +120,22 @@ def add_model_folder_path(folder_name, full_folder_path): def get_folder_paths(folder_name): return folder_names_and_paths[folder_name][0][:] -def recursive_search(directory): +def recursive_search(directory, excluded_dir_names=None): if not os.path.isdir(directory): return [], {} + + if excluded_dir_names is None: + excluded_dir_names = [] + result = [] dirs = {directory: os.path.getmtime(directory)} - for root, subdir, file in os.walk(directory, followlinks=True): - for filepath in file: - #we os.path,join directory with a blank string to generate a path separator at the end. - result.append(os.path.join(root, filepath).replace(os.path.join(directory,''),'')) - for d in subdir: - path = os.path.join(root, d) + for dirpath, subdirs, filenames in os.walk(directory, followlinks=True, topdown=True): + subdirs[:] = [d for d in subdirs if d not in excluded_dir_names] + for file_name in filenames: + relative_path = os.path.relpath(os.path.join(dirpath, file_name), directory) + result.append(relative_path) + for d in subdirs: + path = os.path.join(dirpath, d) dirs[path] = os.path.getmtime(path) return result, dirs @@ -159,7 +163,7 @@ def get_filename_list_(folder_name): folders = folder_names_and_paths[folder_name] output_folders = {} for x in folders[0]: - files, folders_all = recursive_search(x) + files, folders_all = recursive_search(x, excluded_dir_names=[".git"]) output_list.update(filter_files_extensions(files, folders[1])) output_folders = {**output_folders, **folders_all} diff --git a/main.py b/main.py index a4038db4b..9f0f80458 100644 --- a/main.py +++ b/main.py @@ -104,6 +104,7 @@ async def run(server, address='', port=8188, verbose=True, call_on_start=None): def hijack_progress(server): def hook(value, total, preview_image): + comfy.model_management.throw_exception_if_processing_interrupted() server.send_sync("progress", {"value": value, "max": total}, server.client_id) if preview_image is not None: server.send_sync(BinaryEventTypes.UNENCODED_PREVIEW_IMAGE, preview_image, server.client_id) diff --git a/nodes.py b/nodes.py index fa26e5939..77d180526 100644 --- a/nodes.py +++ b/nodes.py @@ -159,6 +159,31 @@ class ConditioningSetArea: c.append(n) return (c, ) +class ConditioningSetAreaPercentage: + @classmethod + def INPUT_TYPES(s): + return {"required": {"conditioning": ("CONDITIONING", ), + "width": ("FLOAT", {"default": 1.0, "min": 0, "max": 1.0, "step": 0.01}), + "height": ("FLOAT", {"default": 1.0, "min": 0, "max": 1.0, "step": 0.01}), + "x": ("FLOAT", {"default": 0, "min": 0, "max": 1.0, "step": 0.01}), + "y": ("FLOAT", {"default": 0, "min": 0, "max": 1.0, "step": 0.01}), + "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), + }} + RETURN_TYPES = ("CONDITIONING",) + FUNCTION = "append" + + CATEGORY = "conditioning" + + def append(self, conditioning, width, height, x, y, strength): + c = [] + for t in conditioning: + n = [t[0], t[1].copy()] + n[1]['area'] = ("percentage", height, width, y, x) + n[1]['strength'] = strength + n[1]['set_area_to_bounds'] = False + c.append(n) + return (c, ) + class ConditioningSetMask: @classmethod def INPUT_TYPES(s): @@ -1583,6 +1608,7 @@ NODE_CLASS_MAPPINGS = { "ConditioningCombine": ConditioningCombine, "ConditioningConcat": ConditioningConcat, "ConditioningSetArea": ConditioningSetArea, + "ConditioningSetAreaPercentage": ConditioningSetAreaPercentage, "ConditioningSetMask": ConditioningSetMask, "KSamplerAdvanced": KSamplerAdvanced, "SetLatentNoiseMask": SetLatentNoiseMask, @@ -1644,6 +1670,7 @@ NODE_DISPLAY_NAME_MAPPINGS = { "ConditioningAverage ": "Conditioning (Average)", "ConditioningConcat": "Conditioning (Concat)", "ConditioningSetArea": "Conditioning (Set Area)", + "ConditioningSetAreaPercentage": "Conditioning (Set Area with Percentage)", "ConditioningSetMask": "Conditioning (Set Mask)", "ControlNetApply": "Apply ControlNet", "ControlNetApplyAdvanced": "Apply ControlNet (Advanced)", diff --git a/server.py b/server.py index 57d5a65df..d04060499 100644 --- a/server.py +++ b/server.py @@ -170,15 +170,15 @@ class PromptServer(): subfolder = post.get("subfolder", "") full_output_folder = os.path.join(upload_dir, os.path.normpath(subfolder)) + filepath = os.path.abspath(os.path.join(full_output_folder, filename)) - if os.path.commonpath((upload_dir, os.path.abspath(full_output_folder))) != upload_dir: + if os.path.commonpath((upload_dir, filepath)) != upload_dir: return web.Response(status=400) if not os.path.exists(full_output_folder): os.makedirs(full_output_folder) split = os.path.splitext(filename) - filepath = os.path.join(full_output_folder, filename) if overwrite is not None and (overwrite == "true" or overwrite == "1"): pass @@ -398,7 +398,7 @@ class PromptServer(): info['output_name'] = obj_class.RETURN_NAMES if hasattr(obj_class, 'RETURN_NAMES') else info['output'] info['name'] = node_class info['display_name'] = nodes.NODE_DISPLAY_NAME_MAPPINGS[node_class] if node_class in nodes.NODE_DISPLAY_NAME_MAPPINGS.keys() else node_class - info['description'] = '' + info['description'] = obj_class.DESCRIPTION if hasattr(obj_class,'DESCRIPTION') else '' info['category'] = 'sd' if hasattr(obj_class, 'OUTPUT_NODE') and obj_class.OUTPUT_NODE == True: info['output_node'] = True @@ -603,7 +603,7 @@ class PromptServer(): await self.send(*msg) async def start(self, address, port, verbose=True, call_on_start=None): - runner = web.AppRunner(self.app) + runner = web.AppRunner(self.app, access_log=None) await runner.setup() site = web.TCPSite(runner, address, port) await site.start() diff --git a/web/extensions/core/widgetInputs.js b/web/extensions/core/widgetInputs.js index f9a5b7278..606605f0a 100644 --- a/web/extensions/core/widgetInputs.js +++ b/web/extensions/core/widgetInputs.js @@ -142,7 +142,7 @@ app.registerExtension({ const r = origOnNodeCreated ? origOnNodeCreated.apply(this) : undefined; if (this.widgets) { for (const w of this.widgets) { - if (w?.options?.forceInput) { + if (w?.options?.forceInput || w?.options?.defaultInput) { const config = nodeData?.input?.required[w.name] || nodeData?.input?.optional?.[w.name] || [w.type, w.options || {}]; convertToInput(this, w, config); } diff --git a/web/lib/litegraph.core.js b/web/lib/litegraph.core.js index 4bb2f0d99..4a21a1b34 100644 --- a/web/lib/litegraph.core.js +++ b/web/lib/litegraph.core.js @@ -11529,7 +11529,7 @@ LGraphNode.prototype.executeAction = function(action) if (timeout) { clearInterval(timeout); } - timeout = setTimeout(refreshHelper, 250); + timeout = setTimeout(refreshHelper, 10); return; } e.preventDefault(); diff --git a/web/scripts/app.js b/web/scripts/app.js index 3b7483cdf..9db4e9230 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -667,11 +667,40 @@ export class ComfyApp { } /** - * Adds a handler on paste that extracts and loads workflows from pasted JSON data + * Adds a handler on paste that extracts and loads images or workflows from pasted JSON data */ #addPasteHandler() { document.addEventListener("paste", (e) => { - let data = (e.clipboardData || window.clipboardData).getData("text/plain"); + let data = (e.clipboardData || window.clipboardData); + const items = data.items; + + // Look for image paste data + for (const item of items) { + if (item.type.startsWith('image/')) { + var imageNode = null; + + // If an image node is selected, paste into it + if (this.canvas.current_node && + this.canvas.current_node.is_selected && + ComfyApp.isImageNode(this.canvas.current_node)) { + imageNode = this.canvas.current_node; + } + + // No image node selected: add a new one + if (!imageNode) { + const newNode = LiteGraph.createNode("LoadImage"); + newNode.pos = [...this.canvas.graph_mouse]; + imageNode = this.graph.add(newNode); + this.graph.change(); + } + const blob = item.getAsFile(); + imageNode.pasteFile(blob); + return; + } + } + + // No image found. Look for node data + data = data.getData("text/plain"); let workflow; try { data = data.slice(data.indexOf("{")); @@ -687,9 +716,41 @@ export class ComfyApp { if (workflow && workflow.version && workflow.nodes && workflow.extra) { this.loadGraphData(workflow); } + else { + if (e.target.type === "text" || e.target.type === "textarea") { + return; + } + + // Litegraph default paste + this.canvas.pasteFromClipboard(); + } + + }); } + + /** + * Adds a handler on copy that serializes selected nodes to JSON + */ + #addCopyHandler() { + document.addEventListener("copy", (e) => { + if (e.target.type === "text" || e.target.type === "textarea") { + // Default system copy + return; + } + // copy nodes and clear clipboard + if (this.canvas.selected_nodes) { + this.canvas.copyToClipboard(); + e.clipboardData.setData('text', ' '); //clearData doesn't remove images from clipboard + e.preventDefault(); + e.stopImmediatePropagation(); + return false; + } + }); + } + + /** * Handle mouse * @@ -745,12 +806,6 @@ export class ComfyApp { const self = this; const origProcessKey = LGraphCanvas.prototype.processKey; LGraphCanvas.prototype.processKey = function(e) { - const res = origProcessKey.apply(this, arguments); - - if (res === false) { - return res; - } - if (!this.graph) { return; } @@ -761,9 +816,10 @@ export class ComfyApp { return; } - if (e.type == "keydown") { + if (e.type == "keydown" && !e.repeat) { + // Ctrl + M mute/unmute - if (e.keyCode == 77 && e.ctrlKey) { + if (e.key === 'm' && e.ctrlKey) { if (this.selected_nodes) { for (var i in this.selected_nodes) { if (this.selected_nodes[i].mode === 2) { // never @@ -776,7 +832,8 @@ export class ComfyApp { block_default = true; } - if (e.keyCode == 66 && e.ctrlKey) { + // Ctrl + B bypass + if (e.key === 'b' && e.ctrlKey) { if (this.selected_nodes) { for (var i in this.selected_nodes) { if (this.selected_nodes[i].mode === 4) { // never @@ -788,6 +845,18 @@ export class ComfyApp { } block_default = true; } + + // Ctrl+C Copy + if ((e.key === 'c') && (e.metaKey || e.ctrlKey)) { + // Trigger onCopy + return true; + } + + // Ctrl+V Paste + if ((e.key === 'v' || e.key == 'V') && (e.metaKey || e.ctrlKey)) { + // Trigger onPaste + return true; + } } this.graph.change(); @@ -798,7 +867,8 @@ export class ComfyApp { return false; } - return res; + // Fall through to Litegraph defaults + return origProcessKey.apply(this, arguments); }; } @@ -994,6 +1064,10 @@ export class ComfyApp { api.addEventListener("execution_start", ({ detail }) => { this.runningNodeId = null; this.lastExecutionError = null + this.graph._nodes.forEach((node) => { + if (node.onExecutionStart) + node.onExecutionStart() + }) }); api.addEventListener("execution_error", ({ detail }) => { @@ -1110,6 +1184,7 @@ export class ComfyApp { this.#addDrawGroupsHandler(); this.#addApiUpdateHandlers(); this.#addDropHandler(); + this.#addCopyHandler(); this.#addPasteHandler(); this.#addKeyboardHandler(); @@ -1151,6 +1226,7 @@ export class ComfyApp { const inputData = inputs[inputName]; const type = inputData[0]; + let widgetCreated = true; if (Array.isArray(type)) { // Enums Object.assign(config, widgets.COMBO(this, inputName, inputData, app) || {}); @@ -1163,11 +1239,17 @@ export class ComfyApp { } else { // Node connection inputs this.addInput(inputName, type); + widgetCreated = false; } - if(inputData[1]?.forceInput && config?.widget) { + + if(widgetCreated && inputData[1]?.forceInput && config?.widget) { if (!config.widget.options) config.widget.options = {}; config.widget.options.forceInput = inputData[1].forceInput; } + if(widgetCreated && inputData[1]?.defaultInput && config?.widget) { + if (!config.widget.options) config.widget.options = {}; + config.widget.options.defaultInput = inputData[1].defaultInput; + } } for (const o in nodeData["output"]) { diff --git a/web/scripts/widgets.js b/web/scripts/widgets.js index 5a4644b13..30caa6a8c 100644 --- a/web/scripts/widgets.js +++ b/web/scripts/widgets.js @@ -8,8 +8,13 @@ function getNumberDefaults(inputData, defaultStep) { if (min == undefined) min = 0; if (max == undefined) max = 2048; if (step == undefined) step = defaultStep; +// precision is the number of decimal places to show. +// by default, display the the smallest number of decimal places such that changes of size step are visible. + let precision = Math.max(-Math.floor(Math.log10(step)),0) +// by default, round the value to those decimal places shown. + let round = Math.round(1000000*Math.pow(0.1,precision))/1000000; - return { val: defaultVal, config: { min, max, step: 10.0 * step } }; + return { val: defaultVal, config: { min, max, step: 10.0 * step, round, precision } }; } export function addValueControlWidget(node, targetWidget, defaultValue = "randomize", values) { @@ -76,7 +81,7 @@ export function addValueControlWidget(node, targetWidget, defaultValue = "random targetWidget.value = max; } } - return valueControl; + return valueControl; }; function seedWidget(node, inputName, inputData, app) { @@ -264,7 +269,10 @@ export const ComfyWidgets = { FLOAT(node, inputName, inputData, app) { let widgetType = isSlider(inputData[1]["display"], app); const { val, config } = getNumberDefaults(inputData, 0.5); - return { widget: node.addWidget(widgetType, inputName, val, () => {}, config) }; + return { widget: node.addWidget(widgetType, inputName, val, + function (v) { + this.value = Math.round(v/config.round)*config.round; + }, config) }; }, INT(node, inputName, inputData, app) { let widgetType = isSlider(inputData[1]["display"], app); @@ -335,7 +343,7 @@ export const ComfyWidgets = { subfolder = name.substring(0, folder_separator); name = name.substring(folder_separator + 1); } - img.src = api.apiURL(`/view?filename=${name}&type=input&subfolder=${subfolder}${app.getPreviewFormatParam()}`); + img.src = api.apiURL(`/view?filename=${encodeURIComponent(name)}&type=input&subfolder=${subfolder}${app.getPreviewFormatParam()}`); node.setSizeForImage?.(); } @@ -387,11 +395,12 @@ export const ComfyWidgets = { } }); - async function uploadFile(file, updateNode) { + async function uploadFile(file, updateNode, pasted = false) { try { // Wrap file in formdata so it includes filename const body = new FormData(); body.append("image", file); + if (pasted) body.append("subfolder", "pasted"); const resp = await api.fetchApi("/upload/image", { method: "POST", body, @@ -399,15 +408,17 @@ export const ComfyWidgets = { if (resp.status === 200) { const data = await resp.json(); - // Add the file as an option and update the widget value - if (!imageWidget.options.values.includes(data.name)) { - imageWidget.options.values.push(data.name); + // Add the file to the dropdown list and update the widget value + let path = data.name; + if (data.subfolder) path = data.subfolder + "/" + path; + + if (!imageWidget.options.values.includes(path)) { + imageWidget.options.values.push(path); } if (updateNode) { - showImage(data.name); - - imageWidget.value = data.name; + showImage(path); + imageWidget.value = path; } } else { alert(resp.status + " - " + resp.statusText); @@ -460,6 +471,16 @@ export const ComfyWidgets = { return handled; }; + node.pasteFile = function(file) { + if (file.type.startsWith("image/")) { + const is_pasted = (file.name === "image.png") && + (file.lastModified - Date.now() < 2000); + uploadFile(file, true, is_pasted); + return true; + } + return false; + } + return { widget: uploadWidget }; }, };