From 6dbb18df928b97ef7858d8df1bf0bc0003d5f302 Mon Sep 17 00:00:00 2001 From: Jairo Correa Date: Tue, 17 Oct 2023 17:53:57 -0300 Subject: [PATCH 001/170] Export and import templates --- web/extensions/core/nodeTemplates.js | 146 ++++++++++++++++++++++----- web/scripts/app.js | 44 +++++++- 2 files changed, 164 insertions(+), 26 deletions(-) diff --git a/web/extensions/core/nodeTemplates.js b/web/extensions/core/nodeTemplates.js index 7059f826d..118565169 100644 --- a/web/extensions/core/nodeTemplates.js +++ b/web/extensions/core/nodeTemplates.js @@ -22,6 +22,15 @@ class ManageTemplates extends ComfyDialog { super(); this.element.classList.add("comfy-manage-templates"); this.templates = this.load(); + + this.importInput = $el("input", { + type: "file", + accept: ".json", + multiple: true, + style: {display: "none"}, + parent: document.body, + onchange: () => this.importAll(), + }); } createButtons() { @@ -34,6 +43,22 @@ class ManageTemplates extends ComfyDialog { onclick: () => this.save(), }) ); + btns.unshift( + $el("button", { + type: "button", + textContent: "Export", + onclick: () => this.exportAll(), + }) + ); + btns.unshift( + $el("button", { + type: "button", + textContent: "Import", + onclick: () => { + this.importInput.click(); + }, + }) + ); return btns; } @@ -69,6 +94,50 @@ class ManageTemplates extends ComfyDialog { localStorage.setItem(id, JSON.stringify(this.templates)); } + async importAll() { + for (const file of this.importInput.files) { + if (file.type === "application/json" || file.name.endsWith(".json")) { + const reader = new FileReader(); + reader.onload = async () => { + var importFile = JSON.parse(reader.result); + if (importFile && importFile?.templates) { + for (const template of importFile.templates) { + if (template?.name && template?.data) { + this.templates.push(template); + } + } + this.store(); + } + }; + await reader.readAsText(file); + } + } + + this.close(); + } + + exportAll() { + if (this.templates.length == 0) { + alert("No templates to export."); + return; + } + + const json = JSON.stringify({templates: this.templates}, null, 2); // convert the data to a JSON string + const blob = new Blob([json], {type: "application/json"}); + const url = URL.createObjectURL(blob); + const a = $el("a", { + href: url, + download: "node_templates.json", + style: {display: "none"}, + parent: document.body, + }); + a.click(); + setTimeout(function () { + a.remove(); + window.URL.revokeObjectURL(url); + }, 0); + } + show() { // Show list of template names + delete button super.show( @@ -97,19 +166,48 @@ class ManageTemplates extends ComfyDialog { }), ] ), - $el("button", { - textContent: "Delete", - style: { - fontSize: "12px", - color: "red", - fontWeight: "normal", - }, - onclick: (e) => { - nameInput.value = ""; - e.target.style.display = "none"; - e.target.previousElementSibling.style.display = "none"; - }, - }), + $el( + "div", + {}, + [ + $el("button", { + textContent: "Export", + style: { + fontSize: "12px", + fontWeight: "normal", + }, + onclick: (e) => { + const json = JSON.stringify({templates: [t]}, null, 2); // convert the data to a JSON string + const blob = new Blob([json], {type: "application/json"}); + const url = URL.createObjectURL(blob); + const a = $el("a", { + href: url, + download: t.name + ".json", + style: {display: "none"}, + parent: document.body, + }); + a.click(); + setTimeout(function () { + a.remove(); + window.URL.revokeObjectURL(url); + }, 0); + }, + }), + $el("button", { + textContent: "Delete", + style: { + fontSize: "12px", + color: "red", + fontWeight: "normal", + }, + onclick: (e) => { + nameInput.value = ""; + e.target.parentElement.style.display = "none"; + e.target.parentElement.previousElementSibling.style.display = "none"; + }, + }), + ] + ), ]; }) ) @@ -164,19 +262,17 @@ app.registerExtension({ }, })); - if (subItems.length) { - subItems.push(null, { - content: "Manage", - callback: () => manage.show(), - }); + subItems.push(null, { + content: "Manage", + callback: () => manage.show(), + }); - options.push({ - content: "Node Templates", - submenu: { - options: subItems, - }, - }); - } + options.push({ + content: "Node Templates", + submenu: { + options: subItems, + }, + }); return options; }; diff --git a/web/scripts/app.js b/web/scripts/app.js index 1a07d69bc..acbd30b2d 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -1416,6 +1416,43 @@ export class ComfyApp { } } + loadTemplateData(templateData) { + if (!templateData?.templates) { + return; + } + + const old = localStorage.getItem("litegrapheditor_clipboard"); + + var maxY, nodeBottom, node; + + for (const template of templateData.templates) { + if (!template?.data) { + continue; + } + + localStorage.setItem("litegrapheditor_clipboard", template.data); + app.canvas.pasteFromClipboard(); + + // Move mouse position down to paste the next template below + + maxY = false; + + for (const i in app.canvas.selected_nodes) { + node = app.canvas.selected_nodes[i]; + + nodeBottom = node.pos[1] + node.size[1]; + + if (maxY === false || nodeBottom > maxY) { + maxY = nodeBottom; + } + } + + app.canvas.graph_mouse[1] = maxY + 50; + } + + localStorage.setItem("litegrapheditor_clipboard", old); + } + /** * Populates the graph with the specified workflow data * @param {*} graphData A serialized graph object @@ -1756,7 +1793,12 @@ export class ComfyApp { } else if (file.type === "application/json" || file.name?.endsWith(".json")) { const reader = new FileReader(); reader.onload = () => { - this.loadGraphData(JSON.parse(reader.result)); + var jsonContent = JSON.parse(reader.result); + if (jsonContent?.templates) { + this.loadTemplateData(jsonContent); + } else { + this.loadGraphData(jsonContent); + } }; reader.readAsText(file); } else if (file.name?.endsWith(".latent") || file.name?.endsWith(".safetensors")) { From a5550747370984714caa859c1c58cd77f43f9008 Mon Sep 17 00:00:00 2001 From: Jairo Correa Date: Tue, 17 Oct 2023 19:44:26 -0300 Subject: [PATCH 002/170] Use name from input to export single node template --- web/extensions/core/nodeTemplates.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/extensions/core/nodeTemplates.js b/web/extensions/core/nodeTemplates.js index 118565169..92d57f9d4 100644 --- a/web/extensions/core/nodeTemplates.js +++ b/web/extensions/core/nodeTemplates.js @@ -182,7 +182,7 @@ class ManageTemplates extends ComfyDialog { const url = URL.createObjectURL(blob); const a = $el("a", { href: url, - download: t.name + ".json", + download: (nameInput.value || t.name) + ".json", style: {display: "none"}, parent: document.body, }); From c2bb34d865a4c512091e6036d421dd26d0889b25 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 18 Oct 2023 02:04:41 -0400 Subject: [PATCH 003/170] Implement updated FreeU as _for_testing->FreeU_V2 node --- comfy_extras/nodes_freelunch.py | 46 +++++++++++++++++++++++++++++++++ 1 file changed, 46 insertions(+) diff --git a/comfy_extras/nodes_freelunch.py b/comfy_extras/nodes_freelunch.py index 07a88bd96..7512b841d 100644 --- a/comfy_extras/nodes_freelunch.py +++ b/comfy_extras/nodes_freelunch.py @@ -61,7 +61,53 @@ class FreeU: m.set_model_output_block_patch(output_block_patch) return (m, ) +class FreeU_V2: + @classmethod + def INPUT_TYPES(s): + return {"required": { "model": ("MODEL",), + "b1": ("FLOAT", {"default": 1.3, "min": 0.0, "max": 10.0, "step": 0.01}), + "b2": ("FLOAT", {"default": 1.4, "min": 0.0, "max": 10.0, "step": 0.01}), + "s1": ("FLOAT", {"default": 0.9, "min": 0.0, "max": 10.0, "step": 0.01}), + "s2": ("FLOAT", {"default": 0.2, "min": 0.0, "max": 10.0, "step": 0.01}), + }} + RETURN_TYPES = ("MODEL",) + FUNCTION = "patch" + + CATEGORY = "_for_testing" + + def patch(self, model, b1, b2, s1, s2): + model_channels = model.model.model_config.unet_config["model_channels"] + scale_dict = {model_channels * 4: (b1, s1), model_channels * 2: (b2, s2)} + on_cpu_devices = {} + + def output_block_patch(h, hsp, transformer_options): + scale = scale_dict.get(h.shape[1], None) + if scale is not None: + hidden_mean = h.mean(1).unsqueeze(1) + B = hidden_mean.shape[0] + hidden_max, _ = torch.max(hidden_mean.view(B, -1), dim=-1, keepdim=True) + hidden_min, _ = torch.min(hidden_mean.view(B, -1), dim=-1, keepdim=True) + hidden_mean = (hidden_mean - hidden_min.unsqueeze(2).unsqueeze(3)) / (hidden_max - hidden_min).unsqueeze(2).unsqueeze(3) + + h[:,:h.shape[1] // 2] = h[:,:h.shape[1] // 2] * ((scale[0] - 1 ) * hidden_mean + 1) + + if hsp.device not in on_cpu_devices: + try: + hsp = Fourier_filter(hsp, threshold=1, scale=scale[1]) + except: + print("Device", hsp.device, "does not support the torch.fft functions used in the FreeU node, switching to CPU.") + on_cpu_devices[hsp.device] = True + hsp = Fourier_filter(hsp.cpu(), threshold=1, scale=scale[1]).to(hsp.device) + else: + hsp = Fourier_filter(hsp.cpu(), threshold=1, scale=scale[1]).to(hsp.device) + + return h, hsp + + m = model.clone() + m.set_model_output_block_patch(output_block_patch) + return (m, ) NODE_CLASS_MAPPINGS = { "FreeU": FreeU, + "FreeU_V2": FreeU_V2, } From 0d45a565daeb8e828680b00770c0e2e03f9955c2 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 18 Oct 2023 02:43:01 -0400 Subject: [PATCH 004/170] Fix memory issue related to control loras. The cleanup function was not getting called. --- comfy/sample.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/comfy/sample.py b/comfy/sample.py index 322272766..e6a69973d 100644 --- a/comfy/sample.py +++ b/comfy/sample.py @@ -98,6 +98,7 @@ def sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative samples = samples.cpu() cleanup_additional_models(models) + cleanup_additional_models(set(get_models_from_cond(positive, "control") + get_models_from_cond(negative, "control"))) return samples def sample_custom(model, noise, cfg, sampler, sigmas, positive, negative, latent_image, noise_mask=None, callback=None, disable_pbar=False, seed=None): @@ -109,5 +110,6 @@ def sample_custom(model, noise, cfg, sampler, sigmas, positive, negative, latent samples = comfy.samplers.sample(real_model, noise, positive_copy, negative_copy, cfg, model.load_device, sampler, sigmas, model_options=model.model_options, latent_image=latent_image, denoise_mask=noise_mask, callback=callback, disable_pbar=disable_pbar, seed=seed) samples = samples.cpu() cleanup_additional_models(models) + cleanup_additional_models(set(get_models_from_cond(positive, "control") + get_models_from_cond(negative, "control"))) return samples From 782a24fce65272649191635ce43e3bec5e09c5e2 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 18 Oct 2023 16:48:37 -0400 Subject: [PATCH 005/170] Refactor cond_concat into model object. --- comfy/model_base.py | 34 +++++++++++++++++++++++++++++++++- comfy/samplers.py | 28 ++++------------------------ 2 files changed, 37 insertions(+), 25 deletions(-) diff --git a/comfy/model_base.py b/comfy/model_base.py index ed2dc83e4..8e704022e 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -26,6 +26,7 @@ class BaseModel(torch.nn.Module): self.adm_channels = unet_config.get("adm_in_channels", None) if self.adm_channels is None: self.adm_channels = 0 + self.inpaint_model = False print("model_type", model_type.name) print("adm", self.adm_channels) @@ -71,6 +72,37 @@ class BaseModel(torch.nn.Module): def encode_adm(self, **kwargs): return None + def cond_concat(self, **kwargs): + if self.inpaint_model: + concat_keys = ("mask", "masked_image") + cond_concat = [] + denoise_mask = kwargs.get("denoise_mask", None) + latent_image = kwargs.get("latent_image", None) + noise = kwargs.get("noise", None) + + def blank_inpaint_image_like(latent_image): + blank_image = torch.ones_like(latent_image) + # these are the values for "zero" in pixel space translated to latent space + blank_image[:,0] *= 0.8223 + blank_image[:,1] *= -0.6876 + blank_image[:,2] *= 0.6364 + blank_image[:,3] *= 0.1380 + return blank_image + + for ck in concat_keys: + if denoise_mask is not None: + if ck == "mask": + cond_concat.append(denoise_mask[:,:1]) + elif ck == "masked_image": + cond_concat.append(latent_image) #NOTE: the latent_image should be masked by the mask in pixel space + else: + if ck == "mask": + cond_concat.append(torch.ones_like(noise)[:,:1]) + elif ck == "masked_image": + cond_concat.append(blank_inpaint_image_like(noise)) + return cond_concat + return None + def load_model_weights(self, sd, unet_prefix=""): to_load = {} keys = list(sd.keys()) @@ -112,7 +144,7 @@ class BaseModel(torch.nn.Module): return {**unet_state_dict, **vae_state_dict, **clip_state_dict} def set_inpaint(self): - self.concat_keys = ("mask", "masked_image") + self.inpaint_model = True def unclip_adm(unclip_conditioning, device, noise_augmentor, noise_augment_merge=0.0): adm_inputs = [] diff --git a/comfy/samplers.py b/comfy/samplers.py index e43f7a6fe..bb8bfdfa4 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -358,15 +358,6 @@ def sgm_scheduler(model, steps): sigs += [0.0] return torch.FloatTensor(sigs) -def blank_inpaint_image_like(latent_image): - blank_image = torch.ones_like(latent_image) - # these are the values for "zero" in pixel space translated to latent space - blank_image[:,0] *= 0.8223 - blank_image[:,1] *= -0.6876 - blank_image[:,2] *= 0.6364 - blank_image[:,3] *= 0.1380 - return blank_image - def get_mask_aabb(masks): if masks.numel() == 0: return torch.zeros((0, 4), device=masks.device, dtype=torch.int) @@ -671,21 +662,10 @@ def sample(model, noise, positive, negative, cfg, device, sampler, sigmas, model extra_args = {"cond":positive, "uncond":negative, "cond_scale": cfg, "model_options": model_options, "seed":seed} - cond_concat = None - if hasattr(model, 'concat_keys'): #inpaint - cond_concat = [] - for ck in model.concat_keys: - if denoise_mask is not None: - if ck == "mask": - cond_concat.append(denoise_mask[:,:1]) - elif ck == "masked_image": - cond_concat.append(latent_image) #NOTE: the latent_image should be masked by the mask in pixel space - else: - if ck == "mask": - cond_concat.append(torch.ones_like(noise)[:,:1]) - elif ck == "masked_image": - cond_concat.append(blank_inpaint_image_like(noise)) - extra_args["cond_concat"] = cond_concat + if hasattr(model, 'cond_concat'): + cond_concat = model.cond_concat(noise=noise, latent_image=latent_image, denoise_mask=denoise_mask) + if cond_concat is not None: + extra_args["cond_concat"] = cond_concat samples = sampler.sample(model_wrap, sigmas, extra_args, callback, noise, latent_image, denoise_mask, disable_pbar) return model.process_latent_out(samples.to(torch.float32)) From 430a8334c500e00fb3b222082c6018cfcbc938aa Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 18 Oct 2023 19:48:36 -0400 Subject: [PATCH 006/170] Fix some potential issues. --- comfy/clip_vision.py | 5 ++++- comfy/sd.py | 9 +++++---- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/comfy/clip_vision.py b/comfy/clip_vision.py index 1206c680d..e085186ef 100644 --- a/comfy/clip_vision.py +++ b/comfy/clip_vision.py @@ -92,8 +92,11 @@ def load_clipvision_from_sd(sd, prefix="", convert_keys=False): json_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), "clip_vision_config_g.json") elif "vision_model.encoder.layers.30.layer_norm1.weight" in sd: json_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), "clip_vision_config_h.json") - else: + elif "vision_model.encoder.layers.22.layer_norm1.weight" in sd: json_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), "clip_vision_config_vitl.json") + else: + return None + clip = ClipVisionModel(json_config) m, u = clip.load_sd(sd) if len(m) > 0: diff --git a/comfy/sd.py b/comfy/sd.py index 48ee5721b..c364b723c 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -434,10 +434,11 @@ def load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, o if output_clip: w = WeightsLoader() clip_target = model_config.clip_target() - clip = CLIP(clip_target, embedding_directory=embedding_directory) - w.cond_stage_model = clip.cond_stage_model - sd = model_config.process_clip_state_dict(sd) - load_model_weights(w, sd) + if clip_target is not None: + clip = CLIP(clip_target, embedding_directory=embedding_directory) + w.cond_stage_model = clip.cond_stage_model + sd = model_config.process_clip_state_dict(sd) + load_model_weights(w, sd) left_over = sd.keys() if len(left_over) > 0: From 45c972aba8cf95b229385bb58193d25fb77bccaa Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 18 Oct 2023 20:36:37 -0400 Subject: [PATCH 007/170] Refactor cond_concat into conditioning. --- comfy/samplers.py | 61 +++++++++++++++++++++++++++++------------------ 1 file changed, 38 insertions(+), 23 deletions(-) diff --git a/comfy/samplers.py b/comfy/samplers.py index bb8bfdfa4..a56599227 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -14,8 +14,8 @@ def lcm(a, b): #TODO: eventually replace by math.lcm (added in python3.9) #The main sampling function shared by all the samplers #Returns predicted noise -def sampling_function(model_function, x, timestep, uncond, cond, cond_scale, cond_concat=None, model_options={}, seed=None): - def get_area_and_mult(cond, x_in, cond_concat_in, timestep_in): +def sampling_function(model_function, x, timestep, uncond, cond, cond_scale, model_options={}, seed=None): + def get_area_and_mult(cond, x_in, timestep_in): area = (x_in.shape[2], x_in.shape[3], 0, 0) strength = 1.0 if 'timestep_start' in cond[1]: @@ -68,12 +68,15 @@ def sampling_function(model_function, x, timestep, uncond, cond, cond_scale, con conditionning = {} conditionning['c_crossattn'] = cond[0] - if cond_concat_in is not None and len(cond_concat_in) > 0: - cropped = [] - for x in cond_concat_in: - cr = x[:,:,area[2]:area[0] + area[2],area[3]:area[1] + area[3]] - cropped.append(cr) - conditionning['c_concat'] = torch.cat(cropped, dim=1) + + if 'concat' in cond[1]: + cond_concat_in = cond[1]['concat'] + if cond_concat_in is not None and len(cond_concat_in) > 0: + cropped = [] + for x in cond_concat_in: + cr = x[:,:,area[2]:area[0] + area[2],area[3]:area[1] + area[3]] + cropped.append(cr) + conditionning['c_concat'] = torch.cat(cropped, dim=1) if adm_cond is not None: conditionning['c_adm'] = adm_cond @@ -173,7 +176,7 @@ def sampling_function(model_function, x, timestep, uncond, cond, cond_scale, con out['c_adm'] = torch.cat(c_adm) return out - def calc_cond_uncond_batch(model_function, cond, uncond, x_in, timestep, max_total_area, cond_concat_in, model_options): + def calc_cond_uncond_batch(model_function, cond, uncond, x_in, timestep, max_total_area, model_options): out_cond = torch.zeros_like(x_in) out_count = torch.ones_like(x_in)/100000.0 @@ -185,14 +188,14 @@ def sampling_function(model_function, x, timestep, uncond, cond, cond_scale, con to_run = [] for x in cond: - p = get_area_and_mult(x, x_in, cond_concat_in, timestep) + p = get_area_and_mult(x, x_in, timestep) if p is None: continue to_run += [(p, COND)] if uncond is not None: for x in uncond: - p = get_area_and_mult(x, x_in, cond_concat_in, timestep) + p = get_area_and_mult(x, x_in, timestep) if p is None: continue @@ -286,7 +289,7 @@ def sampling_function(model_function, x, timestep, uncond, cond, cond_scale, con if math.isclose(cond_scale, 1.0): uncond = None - cond, uncond = calc_cond_uncond_batch(model_function, cond, uncond, x, timestep, max_total_area, cond_concat, model_options) + cond, uncond = calc_cond_uncond_batch(model_function, cond, uncond, x, timestep, max_total_area, model_options) if "sampler_cfg_function" in model_options: args = {"cond": cond, "uncond": uncond, "cond_scale": cond_scale, "timestep": timestep} return model_options["sampler_cfg_function"](args) @@ -307,8 +310,8 @@ class CFGNoisePredictor(torch.nn.Module): super().__init__() self.inner_model = model self.alphas_cumprod = model.alphas_cumprod - def apply_model(self, x, timestep, cond, uncond, cond_scale, cond_concat=None, model_options={}, seed=None): - out = sampling_function(self.inner_model.apply_model, x, timestep, uncond, cond, cond_scale, cond_concat, model_options=model_options, seed=seed) + def apply_model(self, x, timestep, cond, uncond, cond_scale, model_options={}, seed=None): + out = sampling_function(self.inner_model.apply_model, x, timestep, uncond, cond, cond_scale, model_options=model_options, seed=seed) return out @@ -316,11 +319,11 @@ class KSamplerX0Inpaint(torch.nn.Module): def __init__(self, model): super().__init__() self.inner_model = model - def forward(self, x, sigma, uncond, cond, cond_scale, denoise_mask, cond_concat=None, model_options={}, seed=None): + def forward(self, x, sigma, uncond, cond, cond_scale, denoise_mask, model_options={}, seed=None): if denoise_mask is not None: latent_mask = 1. - denoise_mask x = x * denoise_mask + (self.latent_image + self.noise * sigma.reshape([sigma.shape[0]] + [1] * (len(self.noise.shape) - 1))) * latent_mask - out = self.inner_model(x, sigma, cond=cond, uncond=uncond, cond_scale=cond_scale, cond_concat=cond_concat, model_options=model_options, seed=seed) + out = self.inner_model(x, sigma, cond=cond, uncond=uncond, cond_scale=cond_scale, model_options=model_options, seed=seed) if denoise_mask is not None: out *= denoise_mask @@ -534,6 +537,19 @@ def encode_adm(model, conds, batch_size, width, height, device, prompt_type): return conds +def encode_cond(model_function, key, conds, **kwargs): + for t in range(len(conds)): + x = conds[t] + params = x[1].copy() + for k in kwargs: + if k not in params: + params[k] = kwargs[k] + + out = model_function(**params) + if out is not None: + x[1] = x[1].copy() + x[1][key] = out + return conds class Sampler: def sample(self): @@ -653,20 +669,19 @@ def sample(model, noise, positive, negative, cfg, device, sampler, sigmas, model apply_empty_x_to_equal_area(list(filter(lambda c: c[1].get('control_apply_to_uncond', False) == True, positive)), negative, 'control', lambda cond_cnets, x: cond_cnets[x]) apply_empty_x_to_equal_area(positive, negative, 'gligen', lambda cond_cnets, x: cond_cnets[x]) + if latent_image is not None: + latent_image = model.process_latent_in(latent_image) + if model.is_adm(): positive = encode_adm(model, positive, noise.shape[0], noise.shape[3], noise.shape[2], device, "positive") negative = encode_adm(model, negative, noise.shape[0], noise.shape[3], noise.shape[2], device, "negative") - if latent_image is not None: - latent_image = model.process_latent_in(latent_image) + if hasattr(model, 'cond_concat'): + positive = encode_cond(model.cond_concat, "concat", positive, noise=noise, latent_image=latent_image, denoise_mask=denoise_mask) + negative = encode_cond(model.cond_concat, "concat", negative, noise=noise, latent_image=latent_image, denoise_mask=denoise_mask) extra_args = {"cond":positive, "uncond":negative, "cond_scale": cfg, "model_options": model_options, "seed":seed} - if hasattr(model, 'cond_concat'): - cond_concat = model.cond_concat(noise=noise, latent_image=latent_image, denoise_mask=denoise_mask) - if cond_concat is not None: - extra_args["cond_concat"] = cond_concat - samples = sampler.sample(model_wrap, sigmas, extra_args, callback, noise, latent_image, denoise_mask, disable_pbar) return model.process_latent_out(samples.to(torch.float32)) From e6962120c6b6e36b3c87670a988ee825abba8dbe Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 19 Oct 2023 01:10:41 -0400 Subject: [PATCH 008/170] Make sure cond_concat is on the right device. --- comfy/model_base.py | 5 +++-- comfy/samplers.py | 7 ++++--- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/comfy/model_base.py b/comfy/model_base.py index 8e704022e..cda6765e4 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -79,6 +79,7 @@ class BaseModel(torch.nn.Module): denoise_mask = kwargs.get("denoise_mask", None) latent_image = kwargs.get("latent_image", None) noise = kwargs.get("noise", None) + device = kwargs["device"] def blank_inpaint_image_like(latent_image): blank_image = torch.ones_like(latent_image) @@ -92,9 +93,9 @@ class BaseModel(torch.nn.Module): for ck in concat_keys: if denoise_mask is not None: if ck == "mask": - cond_concat.append(denoise_mask[:,:1]) + cond_concat.append(denoise_mask[:,:1].to(device)) elif ck == "masked_image": - cond_concat.append(latent_image) #NOTE: the latent_image should be masked by the mask in pixel space + cond_concat.append(latent_image.to(device)) #NOTE: the latent_image should be masked by the mask in pixel space else: if ck == "mask": cond_concat.append(torch.ones_like(noise)[:,:1]) diff --git a/comfy/samplers.py b/comfy/samplers.py index a56599227..4840b6d9f 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -537,10 +537,11 @@ def encode_adm(model, conds, batch_size, width, height, device, prompt_type): return conds -def encode_cond(model_function, key, conds, **kwargs): +def encode_cond(model_function, key, conds, device, **kwargs): for t in range(len(conds)): x = conds[t] params = x[1].copy() + params["device"] = device for k in kwargs: if k not in params: params[k] = kwargs[k] @@ -677,8 +678,8 @@ def sample(model, noise, positive, negative, cfg, device, sampler, sigmas, model negative = encode_adm(model, negative, noise.shape[0], noise.shape[3], noise.shape[2], device, "negative") if hasattr(model, 'cond_concat'): - positive = encode_cond(model.cond_concat, "concat", positive, noise=noise, latent_image=latent_image, denoise_mask=denoise_mask) - negative = encode_cond(model.cond_concat, "concat", negative, noise=noise, latent_image=latent_image, denoise_mask=denoise_mask) + positive = encode_cond(model.cond_concat, "concat", positive, device, noise=noise, latent_image=latent_image, denoise_mask=denoise_mask) + negative = encode_cond(model.cond_concat, "concat", negative, device, noise=noise, latent_image=latent_image, denoise_mask=denoise_mask) extra_args = {"cond":positive, "uncond":negative, "cond_scale": cfg, "model_options": model_options, "seed":seed} From f1062be622eab6f989d2020f2c96cfff4f53c724 Mon Sep 17 00:00:00 2001 From: "Dr.Lt.Data" Date: Fri, 20 Oct 2023 00:07:08 +0900 Subject: [PATCH 009/170] fix: Fixing intermittent crashes with undefined graphs in the Firefox browser. --- web/scripts/app.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/scripts/app.js b/web/scripts/app.js index 1a07d69bc..aadc7d3de 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -492,7 +492,7 @@ export class ComfyApp { } if (this.imgs && this.imgs.length) { - const canvas = graph.list_of_graphcanvas[0]; + const canvas = app.graph.list_of_graphcanvas[0]; const mouse = canvas.graph_mouse; if (!canvas.pointer_is_down && this.pointerDown) { if (mouse[0] === this.pointerDown.pos[0] && mouse[1] === this.pointerDown.pos[1]) { From 4185324a1d0da3dd9d80e091361d9c218daab007 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 20 Oct 2023 04:03:07 -0400 Subject: [PATCH 010/170] Fix uni_pc sampler math. This changes the images this sampler produces. --- comfy/extra_samplers/uni_pc.py | 51 +++++++++++++++++++--------------- comfy/k_diffusion/external.py | 4 +++ comfy/samplers.py | 2 +- 3 files changed, 33 insertions(+), 24 deletions(-) diff --git a/comfy/extra_samplers/uni_pc.py b/comfy/extra_samplers/uni_pc.py index 7e88bb9fa..58e030d04 100644 --- a/comfy/extra_samplers/uni_pc.py +++ b/comfy/extra_samplers/uni_pc.py @@ -713,8 +713,8 @@ class UniPC: method='singlestep', lower_order_final=True, denoise_to_zero=False, solver_type='dpm_solver', atol=0.0078, rtol=0.05, corrector=False, callback=None, disable_pbar=False ): - t_0 = 1. / self.noise_schedule.total_N if t_end is None else t_end - t_T = self.noise_schedule.T if t_start is None else t_start + # t_0 = 1. / self.noise_schedule.total_N if t_end is None else t_end + # t_T = self.noise_schedule.T if t_start is None else t_start device = x.device steps = len(timesteps) - 1 if method == 'multistep': @@ -769,8 +769,8 @@ class UniPC: callback(step_index, model_prev_list[-1], x, steps) else: raise NotImplementedError() - if denoise_to_zero: - x = self.denoise_to_zero_fn(x, torch.ones((x.shape[0],)).to(device) * t_0) + # if denoise_to_zero: + # x = self.denoise_to_zero_fn(x, torch.ones((x.shape[0],)).to(device) * t_0) return x @@ -833,21 +833,33 @@ def expand_dims(v, dims): return v[(...,) + (None,)*(dims - 1)] +class SigmaConvert: + schedule = "" + def marginal_log_mean_coeff(self, sigma): + return 0.5 * torch.log(1 / ((sigma * sigma) + 1)) + + def marginal_alpha(self, t): + return torch.exp(self.marginal_log_mean_coeff(t)) + + def marginal_std(self, t): + return torch.sqrt(1. - torch.exp(2. * self.marginal_log_mean_coeff(t))) + + def marginal_lambda(self, t): + """ + Compute lambda_t = log(alpha_t) - log(sigma_t) of a given continuous-time label t in [0, T]. + """ + log_mean_coeff = self.marginal_log_mean_coeff(t) + log_std = 0.5 * torch.log(1. - torch.exp(2. * log_mean_coeff)) + return log_mean_coeff - log_std def sample_unipc(model, noise, image, sigmas, sampling_function, max_denoise, extra_args=None, callback=None, disable=False, noise_mask=None, variant='bh1'): - to_zero = False + timesteps = sigmas.clone() if sigmas[-1] == 0: - timesteps = torch.nn.functional.interpolate(sigmas[None,None,:-1], size=(len(sigmas),), mode='linear')[0][0] - to_zero = True + timesteps = sigmas[:] + timesteps[-1] = 0.001 else: timesteps = sigmas.clone() - - alphas_cumprod = model.inner_model.alphas_cumprod - - for s in range(timesteps.shape[0]): - timesteps[s] = (model.sigma_to_discrete_timestep(timesteps[s]) / 1000) + (1 / len(alphas_cumprod)) - - ns = NoiseScheduleVP('discrete', alphas_cumprod=alphas_cumprod) + ns = SigmaConvert() if image is not None: img = image * ns.marginal_alpha(timesteps[0]) @@ -859,16 +871,10 @@ def sample_unipc(model, noise, image, sigmas, sampling_function, max_denoise, ex else: img = noise - if to_zero: - timesteps[-1] = (1 / len(alphas_cumprod)) - - device = noise.device - - model_type = "noise" model_fn = model_wrapper( - model.predict_eps_discrete_timestep, + model.predict_eps_sigma, ns, model_type=model_type, guidance_type="uncond", @@ -878,6 +884,5 @@ def sample_unipc(model, noise, image, sigmas, sampling_function, max_denoise, ex order = min(3, len(timesteps) - 1) uni_pc = UniPC(model_fn, ns, predict_x0=True, thresholding=False, noise_mask=noise_mask, masked_image=image, noise=noise, variant=variant) x = uni_pc.sample(img, timesteps=timesteps, skip_type="time_uniform", method="multistep", order=order, lower_order_final=True, callback=callback, disable_pbar=disable) - if not to_zero: - x /= ns.marginal_alpha(timesteps[-1]) + x /= ns.marginal_alpha(timesteps[-1]) return x diff --git a/comfy/k_diffusion/external.py b/comfy/k_diffusion/external.py index c1a137d9c..953d3db2c 100644 --- a/comfy/k_diffusion/external.py +++ b/comfy/k_diffusion/external.py @@ -97,6 +97,10 @@ class DiscreteSchedule(nn.Module): input = input * ((utils.append_dims(sigma, input.ndim) ** 2 + 1.0) ** 0.5) return (input - self(input, sigma, **kwargs)) / utils.append_dims(sigma, input.ndim) + def predict_eps_sigma(self, input, sigma, **kwargs): + input = input * ((utils.append_dims(sigma, input.ndim) ** 2 + 1.0) ** 0.5) + return (input - self(input, sigma, **kwargs)) / utils.append_dims(sigma, input.ndim) + class DiscreteEpsDDPMDenoiser(DiscreteSchedule): """A wrapper for discrete schedule DDPM models that output eps (the predicted noise).""" diff --git a/comfy/samplers.py b/comfy/samplers.py index 4840b6d9f..0b38fbd1e 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -739,7 +739,7 @@ class KSampler: sigmas = None discard_penultimate_sigma = False - if self.sampler in ['dpm_2', 'dpm_2_ancestral']: + if self.sampler in ['dpm_2', 'dpm_2_ancestral', 'uni_pc', 'uni_pc_bh2']: steps += 1 discard_penultimate_sigma = True From 484bfe46c21cf108a687b579354b5996f867a4f7 Mon Sep 17 00:00:00 2001 From: Jairo Correa Date: Fri, 20 Oct 2023 15:19:29 -0300 Subject: [PATCH 011/170] Clear importInput after import so change event works with same file --- web/extensions/core/nodeTemplates.js | 2 ++ 1 file changed, 2 insertions(+) diff --git a/web/extensions/core/nodeTemplates.js b/web/extensions/core/nodeTemplates.js index 92d57f9d4..434491075 100644 --- a/web/extensions/core/nodeTemplates.js +++ b/web/extensions/core/nodeTemplates.js @@ -113,6 +113,8 @@ class ManageTemplates extends ComfyDialog { } } + this.importInput.value = null; + this.close(); } From 5818ca83a243430e6141ce0e7c1096b4ac83d392 Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Sat, 21 Oct 2023 03:49:04 +0100 Subject: [PATCH 012/170] Unit tests + widget input fixes (#1760) * setup ui unit tests * Refactoring, adding connections * Few tweaks * Fix type * Add general test * Refactored and extended test * move to describe * for groups * Add test for converted widgets on missing nodes + fix crash * tidy * mores tests + refactor * throw earlier to get less confusing error * support outputs * more test * add ci action * use lts node * Fix? * Prevent connecting non matching combos * update * accidently removed npm i * Disable logging extension * added step to generate object_info * fix python * install python * install deps * fix cwd? * logging * Fix double resolve * create dir * update pkg --- .github/workflows/test-ui.yaml | 25 + .gitignore | 1 + tests-ui/.gitignore | 1 + tests-ui/babel.config.json | 3 + tests-ui/globalSetup.js | 14 + tests-ui/jest.config.js | 9 + tests-ui/package-lock.json | 5566 +++++++++++++++++++++++++++ tests-ui/package.json | 30 + tests-ui/setup.js | 87 + tests-ui/tests/widgetInputs.test.js | 319 ++ tests-ui/utils/ezgraph.js | 417 ++ tests-ui/utils/index.js | 71 + tests-ui/utils/litegraph.js | 36 + tests-ui/utils/nopProxy.js | 6 + tests-ui/utils/setup.js | 45 + web/extensions/core/widgetInputs.js | 68 +- 16 files changed, 6680 insertions(+), 18 deletions(-) create mode 100644 .github/workflows/test-ui.yaml create mode 100644 tests-ui/.gitignore create mode 100644 tests-ui/babel.config.json create mode 100644 tests-ui/globalSetup.js create mode 100644 tests-ui/jest.config.js create mode 100644 tests-ui/package-lock.json create mode 100644 tests-ui/package.json create mode 100644 tests-ui/setup.js create mode 100644 tests-ui/tests/widgetInputs.test.js create mode 100644 tests-ui/utils/ezgraph.js create mode 100644 tests-ui/utils/index.js create mode 100644 tests-ui/utils/litegraph.js create mode 100644 tests-ui/utils/nopProxy.js create mode 100644 tests-ui/utils/setup.js diff --git a/.github/workflows/test-ui.yaml b/.github/workflows/test-ui.yaml new file mode 100644 index 000000000..62b4c35f6 --- /dev/null +++ b/.github/workflows/test-ui.yaml @@ -0,0 +1,25 @@ +name: Tests CI + +on: [push, pull_request] + +jobs: + test: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-node@v3 + with: + node-version: 18 + - uses: actions/setup-python@v4 + with: + python-version: '3.10' + - name: Install requirements + run: | + python -m pip install --upgrade pip + pip install -r requirements.txt + - name: Run Tests + run: | + npm install + npm run test:generate + npm test + working-directory: ./tests-ui diff --git a/.gitignore b/.gitignore index 98d91318d..43c038e41 100644 --- a/.gitignore +++ b/.gitignore @@ -14,3 +14,4 @@ venv/ /web/extensions/* !/web/extensions/logging.js.example !/web/extensions/core/ +/tests-ui/data/object_info.json \ No newline at end of file diff --git a/tests-ui/.gitignore b/tests-ui/.gitignore new file mode 100644 index 000000000..b512c09d4 --- /dev/null +++ b/tests-ui/.gitignore @@ -0,0 +1 @@ +node_modules \ No newline at end of file diff --git a/tests-ui/babel.config.json b/tests-ui/babel.config.json new file mode 100644 index 000000000..526ddfd8d --- /dev/null +++ b/tests-ui/babel.config.json @@ -0,0 +1,3 @@ +{ + "presets": ["@babel/preset-env"] +} diff --git a/tests-ui/globalSetup.js b/tests-ui/globalSetup.js new file mode 100644 index 000000000..b9d97f58a --- /dev/null +++ b/tests-ui/globalSetup.js @@ -0,0 +1,14 @@ +module.exports = async function () { + global.ResizeObserver = class ResizeObserver { + observe() {} + unobserve() {} + disconnect() {} + }; + + const { nop } = require("./utils/nopProxy"); + global.enableWebGLCanvas = nop; + + HTMLCanvasElement.prototype.getContext = nop; + + localStorage["Comfy.Settings.Comfy.Logging.Enabled"] = "false"; +}; diff --git a/tests-ui/jest.config.js b/tests-ui/jest.config.js new file mode 100644 index 000000000..b5a5d646d --- /dev/null +++ b/tests-ui/jest.config.js @@ -0,0 +1,9 @@ +/** @type {import('jest').Config} */ +const config = { + testEnvironment: "jsdom", + setupFiles: ["./globalSetup.js"], + clearMocks: true, + resetModules: true, +}; + +module.exports = config; diff --git a/tests-ui/package-lock.json b/tests-ui/package-lock.json new file mode 100644 index 000000000..35911cd7f --- /dev/null +++ b/tests-ui/package-lock.json @@ -0,0 +1,5566 @@ +{ + "name": "comfui-tests", + "version": "1.0.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "comfui-tests", + "version": "1.0.0", + "license": "GPL-3.0", + "devDependencies": { + "@babel/preset-env": "^7.22.20", + "@types/jest": "^29.5.5", + "jest": "^29.7.0", + "jest-environment-jsdom": "^29.7.0" + } + }, + "node_modules/@ampproject/remapping": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/@ampproject/remapping/-/remapping-2.2.1.tgz", + "integrity": "sha512-lFMjJTrFL3j7L9yBxwYfCq2k6qqwHyzuUl/XBnif78PWTJYyL/dfowQHWE3sp6U6ZzqWiiIZnpTMO96zhkjwtg==", + "dev": true, + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.0", + "@jridgewell/trace-mapping": "^0.3.9" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@babel/code-frame": { + "version": "7.22.13", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.22.13.tgz", + "integrity": "sha512-XktuhWlJ5g+3TJXc5upd9Ks1HutSArik6jf2eAjYFyIOf4ej3RN+184cZbzDvbPnuTJIUhPKKJE3cIsYTiAT3w==", + "dev": true, + "dependencies": { + "@babel/highlight": "^7.22.13", + "chalk": "^2.4.2" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/code-frame/node_modules/ansi-styles": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", + "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "dev": true, + "dependencies": { + "color-convert": "^1.9.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/code-frame/node_modules/chalk": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", + "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", + "dev": true, + "dependencies": { + "ansi-styles": "^3.2.1", + "escape-string-regexp": "^1.0.5", + "supports-color": "^5.3.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/code-frame/node_modules/color-convert": { + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", + "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", + "dev": true, + "dependencies": { + "color-name": "1.1.3" + } + }, + "node_modules/@babel/code-frame/node_modules/color-name": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", + "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==", + "dev": true + }, + "node_modules/@babel/code-frame/node_modules/escape-string-regexp": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", + "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==", + "dev": true, + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/@babel/code-frame/node_modules/has-flag": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", + "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/code-frame/node_modules/supports-color": { + "version": "5.5.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", + "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", + "dev": true, + "dependencies": { + "has-flag": "^3.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/compat-data": { + "version": "7.22.20", + "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.22.20.tgz", + "integrity": "sha512-BQYjKbpXjoXwFW5jGqiizJQQT/aC7pFm9Ok1OWssonuguICi264lbgMzRp2ZMmRSlfkX6DsWDDcsrctK8Rwfiw==", + "dev": true, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/core": { + "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.23.0.tgz", + "integrity": "sha512-97z/ju/Jy1rZmDxybphrBuI+jtJjFVoz7Mr9yUQVVVi+DNZE333uFQeMOqcCIy1x3WYBIbWftUSLmbNXNT7qFQ==", + "dev": true, + "dependencies": { + "@ampproject/remapping": "^2.2.0", + "@babel/code-frame": "^7.22.13", + "@babel/generator": "^7.23.0", + "@babel/helper-compilation-targets": "^7.22.15", + "@babel/helper-module-transforms": "^7.23.0", + "@babel/helpers": "^7.23.0", + "@babel/parser": "^7.23.0", + "@babel/template": "^7.22.15", + "@babel/traverse": "^7.23.0", + "@babel/types": "^7.23.0", + "convert-source-map": "^2.0.0", + "debug": "^4.1.0", + "gensync": "^1.0.0-beta.2", + "json5": "^2.2.3", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/babel" + } + }, + "node_modules/@babel/generator": { + "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.23.0.tgz", + "integrity": "sha512-lN85QRR+5IbYrMWM6Y4pE/noaQtg4pNiqeNGX60eqOfo6gtEj6uw/JagelB8vVztSd7R6M5n1+PQkDbHbBRU4g==", + "dev": true, + "dependencies": { + "@babel/types": "^7.23.0", + "@jridgewell/gen-mapping": "^0.3.2", + "@jridgewell/trace-mapping": "^0.3.17", + "jsesc": "^2.5.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-annotate-as-pure": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-annotate-as-pure/-/helper-annotate-as-pure-7.22.5.tgz", + "integrity": "sha512-LvBTxu8bQSQkcyKOU+a1btnNFQ1dMAd0R6PyW3arXes06F6QLWLIrd681bxRPIXlrMGR3XYnW9JyML7dP3qgxg==", + "dev": true, + "dependencies": { + "@babel/types": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-builder-binary-assignment-operator-visitor": { + "version": "7.22.15", + "resolved": "https://registry.npmjs.org/@babel/helper-builder-binary-assignment-operator-visitor/-/helper-builder-binary-assignment-operator-visitor-7.22.15.tgz", + "integrity": "sha512-QkBXwGgaoC2GtGZRoma6kv7Szfv06khvhFav67ZExau2RaXzy8MpHSMO2PNoP2XtmQphJQRHFfg77Bq731Yizw==", + "dev": true, + "dependencies": { + "@babel/types": "^7.22.15" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-compilation-targets": { + "version": "7.22.15", + "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.22.15.tgz", + "integrity": "sha512-y6EEzULok0Qvz8yyLkCvVX+02ic+By2UdOhylwUOvOn9dvYc9mKICJuuU1n1XBI02YWsNsnrY1kc6DVbjcXbtw==", + "dev": true, + "dependencies": { + "@babel/compat-data": "^7.22.9", + "@babel/helper-validator-option": "^7.22.15", + "browserslist": "^4.21.9", + "lru-cache": "^5.1.1", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-create-class-features-plugin": { + "version": "7.22.15", + "resolved": "https://registry.npmjs.org/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.22.15.tgz", + "integrity": "sha512-jKkwA59IXcvSaiK2UN45kKwSC9o+KuoXsBDvHvU/7BecYIp8GQ2UwrVvFgJASUT+hBnwJx6MhvMCuMzwZZ7jlg==", + "dev": true, + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.22.5", + "@babel/helper-environment-visitor": "^7.22.5", + "@babel/helper-function-name": "^7.22.5", + "@babel/helper-member-expression-to-functions": "^7.22.15", + "@babel/helper-optimise-call-expression": "^7.22.5", + "@babel/helper-replace-supers": "^7.22.9", + "@babel/helper-skip-transparent-expression-wrappers": "^7.22.5", + "@babel/helper-split-export-declaration": "^7.22.6", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-create-regexp-features-plugin": { + "version": "7.22.15", + "resolved": "https://registry.npmjs.org/@babel/helper-create-regexp-features-plugin/-/helper-create-regexp-features-plugin-7.22.15.tgz", + "integrity": "sha512-29FkPLFjn4TPEa3RE7GpW+qbE8tlsu3jntNYNfcGsc49LphF1PQIiD+vMZ1z1xVOKt+93khA9tc2JBs3kBjA7w==", + "dev": true, + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.22.5", + "regexpu-core": "^5.3.1", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-define-polyfill-provider": { + "version": "0.4.2", + "resolved": "https://registry.npmjs.org/@babel/helper-define-polyfill-provider/-/helper-define-polyfill-provider-0.4.2.tgz", + "integrity": "sha512-k0qnnOqHn5dK9pZpfD5XXZ9SojAITdCKRn2Lp6rnDGzIbaP0rHyMPk/4wsSxVBVz4RfN0q6VpXWP2pDGIoQ7hw==", + "dev": true, + "dependencies": { + "@babel/helper-compilation-targets": "^7.22.6", + "@babel/helper-plugin-utils": "^7.22.5", + "debug": "^4.1.1", + "lodash.debounce": "^4.0.8", + "resolve": "^1.14.2" + }, + "peerDependencies": { + "@babel/core": "^7.4.0 || ^8.0.0-0 <8.0.0" + } + }, + "node_modules/@babel/helper-environment-visitor": { + "version": "7.22.20", + "resolved": "https://registry.npmjs.org/@babel/helper-environment-visitor/-/helper-environment-visitor-7.22.20.tgz", + "integrity": "sha512-zfedSIzFhat/gFhWfHtgWvlec0nqB9YEIVrpuwjruLlXfUSnA8cJB0miHKwqDnQ7d32aKo2xt88/xZptwxbfhA==", + "dev": true, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-function-name": { + "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.23.0.tgz", + "integrity": "sha512-OErEqsrxjZTJciZ4Oo+eoZqeW9UIiOcuYKRJA4ZAgV9myA+pOXhhmpfNCKjEH/auVfEYVFJ6y1Tc4r0eIApqiw==", + "dev": true, + "dependencies": { + "@babel/template": "^7.22.15", + "@babel/types": "^7.23.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-hoist-variables": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.22.5.tgz", + "integrity": "sha512-wGjk9QZVzvknA6yKIUURb8zY3grXCcOZt+/7Wcy8O2uctxhplmUPkOdlgoNhmdVee2c92JXbf1xpMtVNbfoxRw==", + "dev": true, + "dependencies": { + "@babel/types": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-member-expression-to-functions": { + "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.23.0.tgz", + "integrity": "sha512-6gfrPwh7OuT6gZyJZvd6WbTfrqAo7vm4xCzAXOusKqq/vWdKXphTpj5klHKNmRUU6/QRGlBsyU9mAIPaWHlqJA==", + "dev": true, + "dependencies": { + "@babel/types": "^7.23.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-imports": { + "version": "7.22.15", + "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.22.15.tgz", + "integrity": "sha512-0pYVBnDKZO2fnSPCrgM/6WMc7eS20Fbok+0r88fp+YtWVLZrp4CkafFGIp+W0VKw4a22sgebPT99y+FDNMdP4w==", + "dev": true, + "dependencies": { + "@babel/types": "^7.22.15" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-transforms": { + "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.23.0.tgz", + "integrity": "sha512-WhDWw1tdrlT0gMgUJSlX0IQvoO1eN279zrAUbVB+KpV2c3Tylz8+GnKOLllCS6Z/iZQEyVYxhZVUdPTqs2YYPw==", + "dev": true, + "dependencies": { + "@babel/helper-environment-visitor": "^7.22.20", + "@babel/helper-module-imports": "^7.22.15", + "@babel/helper-simple-access": "^7.22.5", + "@babel/helper-split-export-declaration": "^7.22.6", + "@babel/helper-validator-identifier": "^7.22.20" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-optimise-call-expression": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.22.5.tgz", + "integrity": "sha512-HBwaojN0xFRx4yIvpwGqxiV2tUfl7401jlok564NgB9EHS1y6QT17FmKWm4ztqjeVdXLuC4fSvHc5ePpQjoTbw==", + "dev": true, + "dependencies": { + "@babel/types": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-plugin-utils": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.22.5.tgz", + "integrity": "sha512-uLls06UVKgFG9QD4OeFYLEGteMIAa5kpTPcFL28yuCIIzsf6ZyKZMllKVOCZFhiZ5ptnwX4mtKdWCBE/uT4amg==", + "dev": true, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-remap-async-to-generator": { + "version": "7.22.20", + "resolved": "https://registry.npmjs.org/@babel/helper-remap-async-to-generator/-/helper-remap-async-to-generator-7.22.20.tgz", + "integrity": "sha512-pBGyV4uBqOns+0UvhsTO8qgl8hO89PmiDYv+/COyp1aeMcmfrfruz+/nCMFiYyFF/Knn0yfrC85ZzNFjembFTw==", + "dev": true, + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.22.5", + "@babel/helper-environment-visitor": "^7.22.20", + "@babel/helper-wrap-function": "^7.22.20" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-replace-supers": { + "version": "7.22.20", + "resolved": "https://registry.npmjs.org/@babel/helper-replace-supers/-/helper-replace-supers-7.22.20.tgz", + "integrity": "sha512-qsW0In3dbwQUbK8kejJ4R7IHVGwHJlV6lpG6UA7a9hSa2YEiAib+N1T2kr6PEeUT+Fl7najmSOS6SmAwCHK6Tw==", + "dev": true, + "dependencies": { + "@babel/helper-environment-visitor": "^7.22.20", + "@babel/helper-member-expression-to-functions": "^7.22.15", + "@babel/helper-optimise-call-expression": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-simple-access": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-simple-access/-/helper-simple-access-7.22.5.tgz", + "integrity": "sha512-n0H99E/K+Bika3++WNL17POvo4rKWZ7lZEp1Q+fStVbUi8nxPQEBOlTmCOxW/0JsS56SKKQ+ojAe2pHKJHN35w==", + "dev": true, + "dependencies": { + "@babel/types": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-skip-transparent-expression-wrappers": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-skip-transparent-expression-wrappers/-/helper-skip-transparent-expression-wrappers-7.22.5.tgz", + "integrity": "sha512-tK14r66JZKiC43p8Ki33yLBVJKlQDFoA8GYN67lWCDCqoL6EMMSuM9b+Iff2jHaM/RRFYl7K+iiru7hbRqNx8Q==", + "dev": true, + "dependencies": { + "@babel/types": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-split-export-declaration": { + "version": "7.22.6", + "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.22.6.tgz", + "integrity": "sha512-AsUnxuLhRYsisFiaJwvp1QF+I3KjD5FOxut14q/GzovUe6orHLesW2C7d754kRm53h5gqrz6sFl6sxc4BVtE/g==", + "dev": true, + "dependencies": { + "@babel/types": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-string-parser": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.22.5.tgz", + "integrity": "sha512-mM4COjgZox8U+JcXQwPijIZLElkgEpO5rsERVDJTc2qfCDfERyob6k5WegS14SX18IIjv+XD+GrqNumY5JRCDw==", + "dev": true, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-identifier": { + "version": "7.22.20", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.22.20.tgz", + "integrity": "sha512-Y4OZ+ytlatR8AI+8KZfKuL5urKp7qey08ha31L8b3BwewJAoJamTzyvxPR/5D+KkdJCGPq/+8TukHBlY10FX9A==", + "dev": true, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-option": { + "version": "7.22.15", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.22.15.tgz", + "integrity": "sha512-bMn7RmyFjY/mdECUbgn9eoSY4vqvacUnS9i9vGAGttgFWesO6B4CYWA7XlpbWgBt71iv/hfbPlynohStqnu5hA==", + "dev": true, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-wrap-function": { + "version": "7.22.20", + "resolved": "https://registry.npmjs.org/@babel/helper-wrap-function/-/helper-wrap-function-7.22.20.tgz", + "integrity": "sha512-pms/UwkOpnQe/PDAEdV/d7dVCoBbB+R4FvYoHGZz+4VPcg7RtYy2KP7S2lbuWM6FCSgob5wshfGESbC/hzNXZw==", + "dev": true, + "dependencies": { + "@babel/helper-function-name": "^7.22.5", + "@babel/template": "^7.22.15", + "@babel/types": "^7.22.19" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helpers": { + "version": "7.23.1", + "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.23.1.tgz", + "integrity": "sha512-chNpneuK18yW5Oxsr+t553UZzzAs3aZnFm4bxhebsNTeshrC95yA7l5yl7GBAG+JG1rF0F7zzD2EixK9mWSDoA==", + "dev": true, + "dependencies": { + "@babel/template": "^7.22.15", + "@babel/traverse": "^7.23.0", + "@babel/types": "^7.23.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/highlight": { + "version": "7.22.20", + "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.22.20.tgz", + "integrity": "sha512-dkdMCN3py0+ksCgYmGG8jKeGA/8Tk+gJwSYYlFGxG5lmhfKNoAy004YpLxpS1W2J8m/EK2Ew+yOs9pVRwO89mg==", + "dev": true, + "dependencies": { + "@babel/helper-validator-identifier": "^7.22.20", + "chalk": "^2.4.2", + "js-tokens": "^4.0.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/highlight/node_modules/ansi-styles": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", + "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "dev": true, + "dependencies": { + "color-convert": "^1.9.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/highlight/node_modules/chalk": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", + "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", + "dev": true, + "dependencies": { + "ansi-styles": "^3.2.1", + "escape-string-regexp": "^1.0.5", + "supports-color": "^5.3.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/highlight/node_modules/color-convert": { + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", + "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", + "dev": true, + "dependencies": { + "color-name": "1.1.3" + } + }, + "node_modules/@babel/highlight/node_modules/color-name": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", + "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==", + "dev": true + }, + "node_modules/@babel/highlight/node_modules/escape-string-regexp": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", + "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==", + "dev": true, + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/@babel/highlight/node_modules/has-flag": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", + "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/highlight/node_modules/supports-color": { + "version": "5.5.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", + "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", + "dev": true, + "dependencies": { + "has-flag": "^3.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/parser": { + "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.23.0.tgz", + "integrity": "sha512-vvPKKdMemU85V9WE/l5wZEmImpCtLqbnTvqDS2U1fJ96KrxoW7KrXhNsNCblQlg8Ck4b85yxdTyelsMUgFUXiw==", + "dev": true, + "bin": { + "parser": "bin/babel-parser.js" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression": { + "version": "7.22.15", + "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression/-/plugin-bugfix-safari-id-destructuring-collision-in-function-expression-7.22.15.tgz", + "integrity": "sha512-FB9iYlz7rURmRJyXRKEnalYPPdn87H5no108cyuQQyMwlpJ2SJtpIUBI27kdTin956pz+LPypkPVPUTlxOmrsg==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": { + "version": "7.22.15", + "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining/-/plugin-bugfix-v8-spread-parameters-in-optional-chaining-7.22.15.tgz", + "integrity": "sha512-Hyph9LseGvAeeXzikV88bczhsrLrIZqDPxO+sSmAunMPaGrBGhfMWzCPYTtiW9t+HzSE2wtV8e5cc5P6r1xMDQ==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/helper-skip-transparent-expression-wrappers": "^7.22.5", + "@babel/plugin-transform-optional-chaining": "^7.22.15" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.13.0" + } + }, + "node_modules/@babel/plugin-proposal-private-property-in-object": { + "version": "7.21.0-placeholder-for-preset-env.2", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-private-property-in-object/-/plugin-proposal-private-property-in-object-7.21.0-placeholder-for-preset-env.2.tgz", + "integrity": "sha512-SOSkfJDddaM7mak6cPEpswyTRnuRltl429hMraQEglW+OkovnCzsiszTmsrlY//qLFjCpQDFRvjdm2wA5pPm9w==", + "dev": true, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-async-generators": { + "version": "7.8.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-async-generators/-/plugin-syntax-async-generators-7.8.4.tgz", + "integrity": "sha512-tycmZxkGfZaxhMRbXlPXuVFpdWlXpir2W4AMhSJgRKzk/eDlIXOhb2LHWoLpDF7TEHylV5zNhykX6KAgHJmTNw==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-bigint": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-bigint/-/plugin-syntax-bigint-7.8.3.tgz", + "integrity": "sha512-wnTnFlG+YxQm3vDxpGE57Pj0srRU4sHE/mDkt1qv2YJJSeUAec2ma4WLUnUPeKjyrfntVwe/N6dCXpU+zL3Npg==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-class-properties": { + "version": "7.12.13", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-properties/-/plugin-syntax-class-properties-7.12.13.tgz", + "integrity": "sha512-fm4idjKla0YahUNgFNLCB0qySdsoPiZP3iQE3rky0mBUtMZ23yDJ9SJdg6dXTSDnulOVqiF3Hgr9nbXvXTQZYA==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.12.13" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-class-static-block": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-static-block/-/plugin-syntax-class-static-block-7.14.5.tgz", + "integrity": "sha512-b+YyPmr6ldyNnM6sqYeMWE+bgJcJpO6yS4QD7ymxgH34GBPNDM/THBh8iunyvKIZztiwLH4CJZ0RxTk9emgpjw==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-dynamic-import": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-dynamic-import/-/plugin-syntax-dynamic-import-7.8.3.tgz", + "integrity": "sha512-5gdGbFon+PszYzqs83S3E5mpi7/y/8M9eC90MRTZfduQOYW76ig6SOSPNe41IG5LoP3FGBn2N0RjVDSQiS94kQ==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-export-namespace-from": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-export-namespace-from/-/plugin-syntax-export-namespace-from-7.8.3.tgz", + "integrity": "sha512-MXf5laXo6c1IbEbegDmzGPwGNTsHZmEy6QGznu5Sh2UCWvueywb2ee+CCE4zQiZstxU9BMoQO9i6zUFSY0Kj0Q==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.3" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-import-assertions": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-assertions/-/plugin-syntax-import-assertions-7.22.5.tgz", + "integrity": "sha512-rdV97N7KqsRzeNGoWUOK6yUsWarLjE5Su/Snk9IYPU9CwkWHs4t+rTGOvffTR8XGkJMTAdLfO0xVnXm8wugIJg==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-import-attributes": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-attributes/-/plugin-syntax-import-attributes-7.22.5.tgz", + "integrity": "sha512-KwvoWDeNKPETmozyFE0P2rOLqh39EoQHNjqizrI5B8Vt0ZNS7M56s7dAiAqbYfiAYOuIzIh96z3iR2ktgu3tEg==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-import-meta": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-meta/-/plugin-syntax-import-meta-7.10.4.tgz", + "integrity": "sha512-Yqfm+XDx0+Prh3VSeEQCPU81yC+JWZ2pDPFSS4ZdpfZhp4MkFMaDC1UqseovEKwSUpnIL7+vK+Clp7bfh0iD7g==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.10.4" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-json-strings": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-json-strings/-/plugin-syntax-json-strings-7.8.3.tgz", + "integrity": "sha512-lY6kdGpWHvjoe2vk4WrAapEuBR69EMxZl+RoGRhrFGNYVK8mOPAW8VfbT/ZgrFbXlDNiiaxQnAtgVCZ6jv30EA==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-jsx": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.22.5.tgz", + "integrity": "sha512-gvyP4hZrgrs/wWMaocvxZ44Hw0b3W8Pe+cMxc8V1ULQ07oh8VNbIRaoD1LRZVTvD+0nieDKjfgKg89sD7rrKrg==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-logical-assignment-operators": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-logical-assignment-operators/-/plugin-syntax-logical-assignment-operators-7.10.4.tgz", + "integrity": "sha512-d8waShlpFDinQ5MtvGU9xDAOzKH47+FFoney2baFIoMr952hKOLp1HR7VszoZvOsV/4+RRszNY7D17ba0te0ig==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.10.4" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-nullish-coalescing-operator": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-nullish-coalescing-operator/-/plugin-syntax-nullish-coalescing-operator-7.8.3.tgz", + "integrity": "sha512-aSff4zPII1u2QD7y+F8oDsz19ew4IGEJg9SVW+bqwpwtfFleiQDMdzA/R+UlWDzfnHFCxxleFT0PMIrR36XLNQ==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-numeric-separator": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-numeric-separator/-/plugin-syntax-numeric-separator-7.10.4.tgz", + "integrity": "sha512-9H6YdfkcK/uOnY/K7/aA2xpzaAgkQn37yzWUMRK7OaPOqOpGS1+n0H5hxT9AUw9EsSjPW8SVyMJwYRtWs3X3ug==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.10.4" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-object-rest-spread": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-object-rest-spread/-/plugin-syntax-object-rest-spread-7.8.3.tgz", + "integrity": "sha512-XoqMijGZb9y3y2XskN+P1wUGiVwWZ5JmoDRwx5+3GmEplNyVM2s2Dg8ILFQm8rWM48orGy5YpI5Bl8U1y7ydlA==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-optional-catch-binding": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-catch-binding/-/plugin-syntax-optional-catch-binding-7.8.3.tgz", + "integrity": "sha512-6VPD0Pc1lpTqw0aKoeRTMiB+kWhAoT24PA+ksWSBrFtl5SIRVpZlwN3NNPQjehA2E/91FV3RjLWoVTglWcSV3Q==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-optional-chaining": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-chaining/-/plugin-syntax-optional-chaining-7.8.3.tgz", + "integrity": "sha512-KoK9ErH1MBlCPxV0VANkXW2/dw4vlbGDrFgz8bmUsBGYkFRcbRwMh6cIJubdPrkxRwuGdtCk0v/wPTKbQgBjkg==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-private-property-in-object": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-private-property-in-object/-/plugin-syntax-private-property-in-object-7.14.5.tgz", + "integrity": "sha512-0wVnp9dxJ72ZUJDV27ZfbSj6iHLoytYZmh3rFcxNnvsJF3ktkzLDZPy/mA17HGsaQT3/DQsWYX1f1QGWkCoVUg==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-top-level-await": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-top-level-await/-/plugin-syntax-top-level-await-7.14.5.tgz", + "integrity": "sha512-hx++upLv5U1rgYfwe1xBQUhRmU41NEvpUvrp8jkrSCdvGSnM5/qdRMtylJ6PG5OFkBaHkbTAKTnd3/YyESRHFw==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-typescript": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.22.5.tgz", + "integrity": "sha512-1mS2o03i7t1c6VzH6fdQ3OA8tcEIxwG18zIPRp+UY1Ihv6W+XZzBCVxExF9upussPXJ0xE9XRHwMoNs1ep/nRQ==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-unicode-sets-regex": { + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-unicode-sets-regex/-/plugin-syntax-unicode-sets-regex-7.18.6.tgz", + "integrity": "sha512-727YkEAPwSIQTv5im8QHz3upqp92JTWhidIC81Tdx4VJYIte/VndKf1qKrfnnhPLiPghStWfvC/iFaMCQu7Nqg==", + "dev": true, + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.18.6", + "@babel/helper-plugin-utils": "^7.18.6" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/plugin-transform-arrow-functions": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-arrow-functions/-/plugin-transform-arrow-functions-7.22.5.tgz", + "integrity": "sha512-26lTNXoVRdAnsaDXPpvCNUq+OVWEVC6bx7Vvz9rC53F2bagUWW4u4ii2+h8Fejfh7RYqPxn+libeFBBck9muEw==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-async-generator-functions": { + "version": "7.22.15", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-async-generator-functions/-/plugin-transform-async-generator-functions-7.22.15.tgz", + "integrity": "sha512-jBm1Es25Y+tVoTi5rfd5t1KLmL8ogLKpXszboWOTTtGFGz2RKnQe2yn7HbZ+kb/B8N0FVSGQo874NSlOU1T4+w==", + "dev": true, + "dependencies": { + "@babel/helper-environment-visitor": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/helper-remap-async-to-generator": "^7.22.9", + "@babel/plugin-syntax-async-generators": "^7.8.4" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-async-to-generator": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-async-to-generator/-/plugin-transform-async-to-generator-7.22.5.tgz", + "integrity": "sha512-b1A8D8ZzE/VhNDoV1MSJTnpKkCG5bJo+19R4o4oy03zM7ws8yEMK755j61Dc3EyvdysbqH5BOOTquJ7ZX9C6vQ==", + "dev": true, + "dependencies": { + "@babel/helper-module-imports": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/helper-remap-async-to-generator": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-block-scoped-functions": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoped-functions/-/plugin-transform-block-scoped-functions-7.22.5.tgz", + "integrity": "sha512-tdXZ2UdknEKQWKJP1KMNmuF5Lx3MymtMN/pvA+p/VEkhK8jVcQ1fzSy8KM9qRYhAf2/lV33hoMPKI/xaI9sADA==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-block-scoping": { + "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoping/-/plugin-transform-block-scoping-7.23.0.tgz", + "integrity": "sha512-cOsrbmIOXmf+5YbL99/S49Y3j46k/T16b9ml8bm9lP6N9US5iQ2yBK7gpui1pg0V/WMcXdkfKbTb7HXq9u+v4g==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-class-properties": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-class-properties/-/plugin-transform-class-properties-7.22.5.tgz", + "integrity": "sha512-nDkQ0NfkOhPTq8YCLiWNxp1+f9fCobEjCb0n8WdbNUBc4IB5V7P1QnX9IjpSoquKrXF5SKojHleVNs2vGeHCHQ==", + "dev": true, + "dependencies": { + "@babel/helper-create-class-features-plugin": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-class-static-block": { + "version": "7.22.11", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-class-static-block/-/plugin-transform-class-static-block-7.22.11.tgz", + "integrity": "sha512-GMM8gGmqI7guS/llMFk1bJDkKfn3v3C4KHK9Yg1ey5qcHcOlKb0QvcMrgzvxo+T03/4szNh5lghY+fEC98Kq9g==", + "dev": true, + "dependencies": { + "@babel/helper-create-class-features-plugin": "^7.22.11", + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/plugin-syntax-class-static-block": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.12.0" + } + }, + "node_modules/@babel/plugin-transform-classes": { + "version": "7.22.15", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-classes/-/plugin-transform-classes-7.22.15.tgz", + "integrity": "sha512-VbbC3PGjBdE0wAWDdHM9G8Gm977pnYI0XpqMd6LrKISj8/DJXEsWqgRuTYaNE9Bv0JGhTZUzHDlMk18IpOuoqw==", + "dev": true, + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.22.5", + "@babel/helper-compilation-targets": "^7.22.15", + "@babel/helper-environment-visitor": "^7.22.5", + "@babel/helper-function-name": "^7.22.5", + "@babel/helper-optimise-call-expression": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/helper-replace-supers": "^7.22.9", + "@babel/helper-split-export-declaration": "^7.22.6", + "globals": "^11.1.0" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-computed-properties": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-computed-properties/-/plugin-transform-computed-properties-7.22.5.tgz", + "integrity": "sha512-4GHWBgRf0krxPX+AaPtgBAlTgTeZmqDynokHOX7aqqAB4tHs3U2Y02zH6ETFdLZGcg9UQSD1WCmkVrE9ErHeOg==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/template": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-destructuring": { + "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-destructuring/-/plugin-transform-destructuring-7.23.0.tgz", + "integrity": "sha512-vaMdgNXFkYrB+8lbgniSYWHsgqK5gjaMNcc84bMIOMRLH0L9AqYq3hwMdvnyqj1OPqea8UtjPEuS/DCenah1wg==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-dotall-regex": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-dotall-regex/-/plugin-transform-dotall-regex-7.22.5.tgz", + "integrity": "sha512-5/Yk9QxCQCl+sOIB1WelKnVRxTJDSAIxtJLL2/pqL14ZVlbH0fUQUZa/T5/UnQtBNgghR7mfB8ERBKyKPCi7Vw==", + "dev": true, + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-duplicate-keys": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-duplicate-keys/-/plugin-transform-duplicate-keys-7.22.5.tgz", + "integrity": "sha512-dEnYD+9BBgld5VBXHnF/DbYGp3fqGMsyxKbtD1mDyIA7AkTSpKXFhCVuj/oQVOoALfBs77DudA0BE4d5mcpmqw==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-dynamic-import": { + "version": "7.22.11", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-dynamic-import/-/plugin-transform-dynamic-import-7.22.11.tgz", + "integrity": "sha512-g/21plo58sfteWjaO0ZNVb+uEOkJNjAaHhbejrnBmu011l/eNDScmkbjCC3l4FKb10ViaGU4aOkFznSu2zRHgA==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/plugin-syntax-dynamic-import": "^7.8.3" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-exponentiation-operator": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-exponentiation-operator/-/plugin-transform-exponentiation-operator-7.22.5.tgz", + "integrity": "sha512-vIpJFNM/FjZ4rh1myqIya9jXwrwwgFRHPjT3DkUA9ZLHuzox8jiXkOLvwm1H+PQIP3CqfC++WPKeuDi0Sjdj1g==", + "dev": true, + "dependencies": { + "@babel/helper-builder-binary-assignment-operator-visitor": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-export-namespace-from": { + "version": "7.22.11", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-export-namespace-from/-/plugin-transform-export-namespace-from-7.22.11.tgz", + "integrity": "sha512-xa7aad7q7OiT8oNZ1mU7NrISjlSkVdMbNxn9IuLZyL9AJEhs1Apba3I+u5riX1dIkdptP5EKDG5XDPByWxtehw==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/plugin-syntax-export-namespace-from": "^7.8.3" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-for-of": { + "version": "7.22.15", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-for-of/-/plugin-transform-for-of-7.22.15.tgz", + "integrity": "sha512-me6VGeHsx30+xh9fbDLLPi0J1HzmeIIyenoOQHuw2D4m2SAU3NrspX5XxJLBpqn5yrLzrlw2Iy3RA//Bx27iOA==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-function-name": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-function-name/-/plugin-transform-function-name-7.22.5.tgz", + "integrity": "sha512-UIzQNMS0p0HHiQm3oelztj+ECwFnj+ZRV4KnguvlsD2of1whUeM6o7wGNj6oLwcDoAXQ8gEqfgC24D+VdIcevg==", + "dev": true, + "dependencies": { + "@babel/helper-compilation-targets": "^7.22.5", + "@babel/helper-function-name": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-json-strings": { + "version": "7.22.11", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-json-strings/-/plugin-transform-json-strings-7.22.11.tgz", + "integrity": "sha512-CxT5tCqpA9/jXFlme9xIBCc5RPtdDq3JpkkhgHQqtDdiTnTI0jtZ0QzXhr5DILeYifDPp2wvY2ad+7+hLMW5Pw==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/plugin-syntax-json-strings": "^7.8.3" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-literals": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-literals/-/plugin-transform-literals-7.22.5.tgz", + "integrity": "sha512-fTLj4D79M+mepcw3dgFBTIDYpbcB9Sm0bpm4ppXPaO+U+PKFFyV9MGRvS0gvGw62sd10kT5lRMKXAADb9pWy8g==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-logical-assignment-operators": { + "version": "7.22.11", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-logical-assignment-operators/-/plugin-transform-logical-assignment-operators-7.22.11.tgz", + "integrity": "sha512-qQwRTP4+6xFCDV5k7gZBF3C31K34ut0tbEcTKxlX/0KXxm9GLcO14p570aWxFvVzx6QAfPgq7gaeIHXJC8LswQ==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-member-expression-literals": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-member-expression-literals/-/plugin-transform-member-expression-literals-7.22.5.tgz", + "integrity": "sha512-RZEdkNtzzYCFl9SE9ATaUMTj2hqMb4StarOJLrZRbqqU4HSBE7UlBw9WBWQiDzrJZJdUWiMTVDI6Gv/8DPvfew==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-modules-amd": { + "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-amd/-/plugin-transform-modules-amd-7.23.0.tgz", + "integrity": "sha512-xWT5gefv2HGSm4QHtgc1sYPbseOyf+FFDo2JbpE25GWl5BqTGO9IMwTYJRoIdjsF85GE+VegHxSCUt5EvoYTAw==", + "dev": true, + "dependencies": { + "@babel/helper-module-transforms": "^7.23.0", + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-modules-commonjs": { + "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.23.0.tgz", + "integrity": "sha512-32Xzss14/UVc7k9g775yMIvkVK8xwKE0DPdP5JTapr3+Z9w4tzeOuLNY6BXDQR6BdnzIlXnCGAzsk/ICHBLVWQ==", + "dev": true, + "dependencies": { + "@babel/helper-module-transforms": "^7.23.0", + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/helper-simple-access": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-modules-systemjs": { + "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-systemjs/-/plugin-transform-modules-systemjs-7.23.0.tgz", + "integrity": "sha512-qBej6ctXZD2f+DhlOC9yO47yEYgUh5CZNz/aBoH4j/3NOlRfJXJbY7xDQCqQVf9KbrqGzIWER1f23doHGrIHFg==", + "dev": true, + "dependencies": { + "@babel/helper-hoist-variables": "^7.22.5", + "@babel/helper-module-transforms": "^7.23.0", + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/helper-validator-identifier": "^7.22.20" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-modules-umd": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-umd/-/plugin-transform-modules-umd-7.22.5.tgz", + "integrity": "sha512-+S6kzefN/E1vkSsKx8kmQuqeQsvCKCd1fraCM7zXm4SFoggI099Tr4G8U81+5gtMdUeMQ4ipdQffbKLX0/7dBQ==", + "dev": true, + "dependencies": { + "@babel/helper-module-transforms": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-named-capturing-groups-regex": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-named-capturing-groups-regex/-/plugin-transform-named-capturing-groups-regex-7.22.5.tgz", + "integrity": "sha512-YgLLKmS3aUBhHaxp5hi1WJTgOUb/NCuDHzGT9z9WTt3YG+CPRhJs6nprbStx6DnWM4dh6gt7SU3sZodbZ08adQ==", + "dev": true, + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/plugin-transform-new-target": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-new-target/-/plugin-transform-new-target-7.22.5.tgz", + "integrity": "sha512-AsF7K0Fx/cNKVyk3a+DW0JLo+Ua598/NxMRvxDnkpCIGFh43+h/v2xyhRUYf6oD8gE4QtL83C7zZVghMjHd+iw==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-nullish-coalescing-operator": { + "version": "7.22.11", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-nullish-coalescing-operator/-/plugin-transform-nullish-coalescing-operator-7.22.11.tgz", + "integrity": "sha512-YZWOw4HxXrotb5xsjMJUDlLgcDXSfO9eCmdl1bgW4+/lAGdkjaEvOnQ4p5WKKdUgSzO39dgPl0pTnfxm0OAXcg==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-numeric-separator": { + "version": "7.22.11", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-numeric-separator/-/plugin-transform-numeric-separator-7.22.11.tgz", + "integrity": "sha512-3dzU4QGPsILdJbASKhF/V2TVP+gJya1PsueQCxIPCEcerqF21oEcrob4mzjsp2Py/1nLfF5m+xYNMDpmA8vffg==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/plugin-syntax-numeric-separator": "^7.10.4" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-object-rest-spread": { + "version": "7.22.15", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-object-rest-spread/-/plugin-transform-object-rest-spread-7.22.15.tgz", + "integrity": "sha512-fEB+I1+gAmfAyxZcX1+ZUwLeAuuf8VIg67CTznZE0MqVFumWkh8xWtn58I4dxdVf080wn7gzWoF8vndOViJe9Q==", + "dev": true, + "dependencies": { + "@babel/compat-data": "^7.22.9", + "@babel/helper-compilation-targets": "^7.22.15", + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/plugin-syntax-object-rest-spread": "^7.8.3", + "@babel/plugin-transform-parameters": "^7.22.15" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-object-super": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-object-super/-/plugin-transform-object-super-7.22.5.tgz", + "integrity": "sha512-klXqyaT9trSjIUrcsYIfETAzmOEZL3cBYqOYLJxBHfMFFggmXOv+NYSX/Jbs9mzMVESw/WycLFPRx8ba/b2Ipw==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/helper-replace-supers": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-optional-catch-binding": { + "version": "7.22.11", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-optional-catch-binding/-/plugin-transform-optional-catch-binding-7.22.11.tgz", + "integrity": "sha512-rli0WxesXUeCJnMYhzAglEjLWVDF6ahb45HuprcmQuLidBJFWjNnOzssk2kuc6e33FlLaiZhG/kUIzUMWdBKaQ==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/plugin-syntax-optional-catch-binding": "^7.8.3" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-optional-chaining": { + "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-optional-chaining/-/plugin-transform-optional-chaining-7.23.0.tgz", + "integrity": "sha512-sBBGXbLJjxTzLBF5rFWaikMnOGOk/BmK6vVByIdEggZ7Vn6CvWXZyRkkLFK6WE0IF8jSliyOkUN6SScFgzCM0g==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/helper-skip-transparent-expression-wrappers": "^7.22.5", + "@babel/plugin-syntax-optional-chaining": "^7.8.3" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-parameters": { + "version": "7.22.15", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-parameters/-/plugin-transform-parameters-7.22.15.tgz", + "integrity": "sha512-hjk7qKIqhyzhhUvRT683TYQOFa/4cQKwQy7ALvTpODswN40MljzNDa0YldevS6tGbxwaEKVn502JmY0dP7qEtQ==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-private-methods": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-private-methods/-/plugin-transform-private-methods-7.22.5.tgz", + "integrity": "sha512-PPjh4gyrQnGe97JTalgRGMuU4icsZFnWkzicB/fUtzlKUqvsWBKEpPPfr5a2JiyirZkHxnAqkQMO5Z5B2kK3fA==", + "dev": true, + "dependencies": { + "@babel/helper-create-class-features-plugin": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-private-property-in-object": { + "version": "7.22.11", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-private-property-in-object/-/plugin-transform-private-property-in-object-7.22.11.tgz", + "integrity": "sha512-sSCbqZDBKHetvjSwpyWzhuHkmW5RummxJBVbYLkGkaiTOWGxml7SXt0iWa03bzxFIx7wOj3g/ILRd0RcJKBeSQ==", + "dev": true, + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.22.5", + "@babel/helper-create-class-features-plugin": "^7.22.11", + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/plugin-syntax-private-property-in-object": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-property-literals": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-property-literals/-/plugin-transform-property-literals-7.22.5.tgz", + "integrity": "sha512-TiOArgddK3mK/x1Qwf5hay2pxI6wCZnvQqrFSqbtg1GLl2JcNMitVH/YnqjP+M31pLUeTfzY1HAXFDnUBV30rQ==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-regenerator": { + "version": "7.22.10", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-regenerator/-/plugin-transform-regenerator-7.22.10.tgz", + "integrity": "sha512-F28b1mDt8KcT5bUyJc/U9nwzw6cV+UmTeRlXYIl2TNqMMJif0Jeey9/RQ3C4NOd2zp0/TRsDns9ttj2L523rsw==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5", + "regenerator-transform": "^0.15.2" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-reserved-words": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-reserved-words/-/plugin-transform-reserved-words-7.22.5.tgz", + "integrity": "sha512-DTtGKFRQUDm8svigJzZHzb/2xatPc6TzNvAIJ5GqOKDsGFYgAskjRulbR/vGsPKq3OPqtexnz327qYpP57RFyA==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-shorthand-properties": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-shorthand-properties/-/plugin-transform-shorthand-properties-7.22.5.tgz", + "integrity": "sha512-vM4fq9IXHscXVKzDv5itkO1X52SmdFBFcMIBZ2FRn2nqVYqw6dBexUgMvAjHW+KXpPPViD/Yo3GrDEBaRC0QYA==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-spread": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-spread/-/plugin-transform-spread-7.22.5.tgz", + "integrity": "sha512-5ZzDQIGyvN4w8+dMmpohL6MBo+l2G7tfC/O2Dg7/hjpgeWvUx8FzfeOKxGog9IimPa4YekaQ9PlDqTLOljkcxg==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/helper-skip-transparent-expression-wrappers": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-sticky-regex": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-sticky-regex/-/plugin-transform-sticky-regex-7.22.5.tgz", + "integrity": "sha512-zf7LuNpHG0iEeiyCNwX4j3gDg1jgt1k3ZdXBKbZSoA3BbGQGvMiSvfbZRR3Dr3aeJe3ooWFZxOOG3IRStYp2Bw==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-template-literals": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-template-literals/-/plugin-transform-template-literals-7.22.5.tgz", + "integrity": "sha512-5ciOehRNf+EyUeewo8NkbQiUs4d6ZxiHo6BcBcnFlgiJfu16q0bQUw9Jvo0b0gBKFG1SMhDSjeKXSYuJLeFSMA==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-typeof-symbol": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-typeof-symbol/-/plugin-transform-typeof-symbol-7.22.5.tgz", + "integrity": "sha512-bYkI5lMzL4kPii4HHEEChkD0rkc+nvnlR6+o/qdqR6zrm0Sv/nodmyLhlq2DO0YKLUNd2VePmPRjJXSBh9OIdA==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-unicode-escapes": { + "version": "7.22.10", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-escapes/-/plugin-transform-unicode-escapes-7.22.10.tgz", + "integrity": "sha512-lRfaRKGZCBqDlRU3UIFovdp9c9mEvlylmpod0/OatICsSfuQ9YFthRo1tpTkGsklEefZdqlEFdY4A2dwTb6ohg==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-unicode-property-regex": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-property-regex/-/plugin-transform-unicode-property-regex-7.22.5.tgz", + "integrity": "sha512-HCCIb+CbJIAE6sXn5CjFQXMwkCClcOfPCzTlilJ8cUatfzwHlWQkbtV0zD338u9dZskwvuOYTuuaMaA8J5EI5A==", + "dev": true, + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-unicode-regex": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-regex/-/plugin-transform-unicode-regex-7.22.5.tgz", + "integrity": "sha512-028laaOKptN5vHJf9/Arr/HiJekMd41hOEZYvNsrsXqJ7YPYuX2bQxh31fkZzGmq3YqHRJzYFFAVYvKfMPKqyg==", + "dev": true, + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-unicode-sets-regex": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-sets-regex/-/plugin-transform-unicode-sets-regex-7.22.5.tgz", + "integrity": "sha512-lhMfi4FC15j13eKrh3DnYHjpGj6UKQHtNKTbtc1igvAhRy4+kLhV07OpLcsN0VgDEw/MjAvJO4BdMJsHwMhzCg==", + "dev": true, + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/preset-env": { + "version": "7.22.20", + "resolved": "https://registry.npmjs.org/@babel/preset-env/-/preset-env-7.22.20.tgz", + "integrity": "sha512-11MY04gGC4kSzlPHRfvVkNAZhUxOvm7DCJ37hPDnUENwe06npjIRAfInEMTGSb4LZK5ZgDFkv5hw0lGebHeTyg==", + "dev": true, + "dependencies": { + "@babel/compat-data": "^7.22.20", + "@babel/helper-compilation-targets": "^7.22.15", + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/helper-validator-option": "^7.22.15", + "@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression": "^7.22.15", + "@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": "^7.22.15", + "@babel/plugin-proposal-private-property-in-object": "7.21.0-placeholder-for-preset-env.2", + "@babel/plugin-syntax-async-generators": "^7.8.4", + "@babel/plugin-syntax-class-properties": "^7.12.13", + "@babel/plugin-syntax-class-static-block": "^7.14.5", + "@babel/plugin-syntax-dynamic-import": "^7.8.3", + "@babel/plugin-syntax-export-namespace-from": "^7.8.3", + "@babel/plugin-syntax-import-assertions": "^7.22.5", + "@babel/plugin-syntax-import-attributes": "^7.22.5", + "@babel/plugin-syntax-import-meta": "^7.10.4", + "@babel/plugin-syntax-json-strings": "^7.8.3", + "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4", + "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3", + "@babel/plugin-syntax-numeric-separator": "^7.10.4", + "@babel/plugin-syntax-object-rest-spread": "^7.8.3", + "@babel/plugin-syntax-optional-catch-binding": "^7.8.3", + "@babel/plugin-syntax-optional-chaining": "^7.8.3", + "@babel/plugin-syntax-private-property-in-object": "^7.14.5", + "@babel/plugin-syntax-top-level-await": "^7.14.5", + "@babel/plugin-syntax-unicode-sets-regex": "^7.18.6", + "@babel/plugin-transform-arrow-functions": "^7.22.5", + "@babel/plugin-transform-async-generator-functions": "^7.22.15", + "@babel/plugin-transform-async-to-generator": "^7.22.5", + "@babel/plugin-transform-block-scoped-functions": "^7.22.5", + "@babel/plugin-transform-block-scoping": "^7.22.15", + "@babel/plugin-transform-class-properties": "^7.22.5", + "@babel/plugin-transform-class-static-block": "^7.22.11", + "@babel/plugin-transform-classes": "^7.22.15", + "@babel/plugin-transform-computed-properties": "^7.22.5", + "@babel/plugin-transform-destructuring": "^7.22.15", + "@babel/plugin-transform-dotall-regex": "^7.22.5", + "@babel/plugin-transform-duplicate-keys": "^7.22.5", + "@babel/plugin-transform-dynamic-import": "^7.22.11", + "@babel/plugin-transform-exponentiation-operator": "^7.22.5", + "@babel/plugin-transform-export-namespace-from": "^7.22.11", + "@babel/plugin-transform-for-of": "^7.22.15", + "@babel/plugin-transform-function-name": "^7.22.5", + "@babel/plugin-transform-json-strings": "^7.22.11", + "@babel/plugin-transform-literals": "^7.22.5", + "@babel/plugin-transform-logical-assignment-operators": "^7.22.11", + "@babel/plugin-transform-member-expression-literals": "^7.22.5", + "@babel/plugin-transform-modules-amd": "^7.22.5", + "@babel/plugin-transform-modules-commonjs": "^7.22.15", + "@babel/plugin-transform-modules-systemjs": "^7.22.11", + "@babel/plugin-transform-modules-umd": "^7.22.5", + "@babel/plugin-transform-named-capturing-groups-regex": "^7.22.5", + "@babel/plugin-transform-new-target": "^7.22.5", + "@babel/plugin-transform-nullish-coalescing-operator": "^7.22.11", + "@babel/plugin-transform-numeric-separator": "^7.22.11", + "@babel/plugin-transform-object-rest-spread": "^7.22.15", + "@babel/plugin-transform-object-super": "^7.22.5", + "@babel/plugin-transform-optional-catch-binding": "^7.22.11", + "@babel/plugin-transform-optional-chaining": "^7.22.15", + "@babel/plugin-transform-parameters": "^7.22.15", + "@babel/plugin-transform-private-methods": "^7.22.5", + "@babel/plugin-transform-private-property-in-object": "^7.22.11", + "@babel/plugin-transform-property-literals": "^7.22.5", + "@babel/plugin-transform-regenerator": "^7.22.10", + "@babel/plugin-transform-reserved-words": "^7.22.5", + "@babel/plugin-transform-shorthand-properties": "^7.22.5", + "@babel/plugin-transform-spread": "^7.22.5", + "@babel/plugin-transform-sticky-regex": "^7.22.5", + "@babel/plugin-transform-template-literals": "^7.22.5", + "@babel/plugin-transform-typeof-symbol": "^7.22.5", + "@babel/plugin-transform-unicode-escapes": "^7.22.10", + "@babel/plugin-transform-unicode-property-regex": "^7.22.5", + "@babel/plugin-transform-unicode-regex": "^7.22.5", + "@babel/plugin-transform-unicode-sets-regex": "^7.22.5", + "@babel/preset-modules": "0.1.6-no-external-plugins", + "@babel/types": "^7.22.19", + "babel-plugin-polyfill-corejs2": "^0.4.5", + "babel-plugin-polyfill-corejs3": "^0.8.3", + "babel-plugin-polyfill-regenerator": "^0.5.2", + "core-js-compat": "^3.31.0", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/preset-modules": { + "version": "0.1.6-no-external-plugins", + "resolved": "https://registry.npmjs.org/@babel/preset-modules/-/preset-modules-0.1.6-no-external-plugins.tgz", + "integrity": "sha512-HrcgcIESLm9aIR842yhJ5RWan/gebQUJ6E/E5+rf0y9o6oj7w0Br+sWuL6kEQ/o/AdfvR1Je9jG18/gnpwjEyA==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.0.0", + "@babel/types": "^7.4.4", + "esutils": "^2.0.2" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0 || ^8.0.0-0 <8.0.0" + } + }, + "node_modules/@babel/regjsgen": { + "version": "0.8.0", + "resolved": "https://registry.npmjs.org/@babel/regjsgen/-/regjsgen-0.8.0.tgz", + "integrity": "sha512-x/rqGMdzj+fWZvCOYForTghzbtqPDZ5gPwaoNGHdgDfF2QA/XZbCBp4Moo5scrkAMPhB7z26XM/AaHuIJdgauA==", + "dev": true + }, + "node_modules/@babel/runtime": { + "version": "7.23.1", + "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.23.1.tgz", + "integrity": "sha512-hC2v6p8ZSI/W0HUzh3V8C5g+NwSKzKPtJwSpTjwl0o297GP9+ZLQSkdvHz46CM3LqyoXxq+5G9komY+eSqSO0g==", + "dev": true, + "dependencies": { + "regenerator-runtime": "^0.14.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/template": { + "version": "7.22.15", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.22.15.tgz", + "integrity": "sha512-QPErUVm4uyJa60rkI73qneDacvdvzxshT3kksGqlGWYdOTIUOwJ7RDUL8sGqslY1uXWSL6xMFKEXDS3ox2uF0w==", + "dev": true, + "dependencies": { + "@babel/code-frame": "^7.22.13", + "@babel/parser": "^7.22.15", + "@babel/types": "^7.22.15" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/traverse": { + "version": "7.23.2", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.23.2.tgz", + "integrity": "sha512-azpe59SQ48qG6nu2CzcMLbxUudtN+dOM9kDbUqGq3HXUJRlo7i8fvPoxQUzYgLZ4cMVmuZgm8vvBpNeRhd6XSw==", + "dev": true, + "dependencies": { + "@babel/code-frame": "^7.22.13", + "@babel/generator": "^7.23.0", + "@babel/helper-environment-visitor": "^7.22.20", + "@babel/helper-function-name": "^7.23.0", + "@babel/helper-hoist-variables": "^7.22.5", + "@babel/helper-split-export-declaration": "^7.22.6", + "@babel/parser": "^7.23.0", + "@babel/types": "^7.23.0", + "debug": "^4.1.0", + "globals": "^11.1.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/types": { + "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.23.0.tgz", + "integrity": "sha512-0oIyUfKoI3mSqMvsxBdclDwxXKXAUA8v/apZbc+iSyARYou1o8ZGDxbUYyLFoW2arqS2jDGqJuZvv1d/io1axg==", + "dev": true, + "dependencies": { + "@babel/helper-string-parser": "^7.22.5", + "@babel/helper-validator-identifier": "^7.22.20", + "to-fast-properties": "^2.0.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@bcoe/v8-coverage": { + "version": "0.2.3", + "resolved": "https://registry.npmjs.org/@bcoe/v8-coverage/-/v8-coverage-0.2.3.tgz", + "integrity": "sha512-0hYQ8SB4Db5zvZB4axdMHGwEaQjkZzFjQiN9LVYvIFB2nSUHW9tYpxWriPrWDASIxiaXax83REcLxuSdnGPZtw==", + "dev": true + }, + "node_modules/@istanbuljs/load-nyc-config": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@istanbuljs/load-nyc-config/-/load-nyc-config-1.1.0.tgz", + "integrity": "sha512-VjeHSlIzpv/NyD3N0YuHfXOPDIixcA1q2ZV98wsMqcYlPmv2n3Yb2lYP9XMElnaFVXg5A7YLTeLu6V84uQDjmQ==", + "dev": true, + "dependencies": { + "camelcase": "^5.3.1", + "find-up": "^4.1.0", + "get-package-type": "^0.1.0", + "js-yaml": "^3.13.1", + "resolve-from": "^5.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@istanbuljs/schema": { + "version": "0.1.3", + "resolved": "https://registry.npmjs.org/@istanbuljs/schema/-/schema-0.1.3.tgz", + "integrity": "sha512-ZXRY4jNvVgSVQ8DL3LTcakaAtXwTVUxE81hslsyD2AtoXW/wVob10HkOJ1X/pAlcI7D+2YoZKg5do8G/w6RYgA==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/@jest/console": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/console/-/console-29.7.0.tgz", + "integrity": "sha512-5Ni4CU7XHQi32IJ398EEP4RrB8eV09sXP2ROqD4bksHrnTree52PsxvX8tpL8LvTZ3pFzXyPbNQReSN41CAhOg==", + "dev": true, + "dependencies": { + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "jest-message-util": "^29.7.0", + "jest-util": "^29.7.0", + "slash": "^3.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/core": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/core/-/core-29.7.0.tgz", + "integrity": "sha512-n7aeXWKMnGtDA48y8TLWJPJmLmmZ642Ceo78cYWEpiD7FzDgmNDV/GCVRorPABdXLJZ/9wzzgZAlHjXjxDHGsg==", + "dev": true, + "dependencies": { + "@jest/console": "^29.7.0", + "@jest/reporters": "^29.7.0", + "@jest/test-result": "^29.7.0", + "@jest/transform": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "ansi-escapes": "^4.2.1", + "chalk": "^4.0.0", + "ci-info": "^3.2.0", + "exit": "^0.1.2", + "graceful-fs": "^4.2.9", + "jest-changed-files": "^29.7.0", + "jest-config": "^29.7.0", + "jest-haste-map": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-regex-util": "^29.6.3", + "jest-resolve": "^29.7.0", + "jest-resolve-dependencies": "^29.7.0", + "jest-runner": "^29.7.0", + "jest-runtime": "^29.7.0", + "jest-snapshot": "^29.7.0", + "jest-util": "^29.7.0", + "jest-validate": "^29.7.0", + "jest-watcher": "^29.7.0", + "micromatch": "^4.0.4", + "pretty-format": "^29.7.0", + "slash": "^3.0.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "node-notifier": "^8.0.1 || ^9.0.0 || ^10.0.0" + }, + "peerDependenciesMeta": { + "node-notifier": { + "optional": true + } + } + }, + "node_modules/@jest/environment": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/environment/-/environment-29.7.0.tgz", + "integrity": "sha512-aQIfHDq33ExsN4jP1NWGXhxgQ/wixs60gDiKO+XVMd8Mn0NWPWgc34ZQDTb2jKaUWQ7MuwoitXAsN2XVXNMpAw==", + "dev": true, + "dependencies": { + "@jest/fake-timers": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "jest-mock": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/expect": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/expect/-/expect-29.7.0.tgz", + "integrity": "sha512-8uMeAMycttpva3P1lBHB8VciS9V0XAr3GymPpipdyQXbBcuhkLQOSe8E/p92RyAdToS6ZD1tFkX+CkhoECE0dQ==", + "dev": true, + "dependencies": { + "expect": "^29.7.0", + "jest-snapshot": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/expect-utils": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/expect-utils/-/expect-utils-29.7.0.tgz", + "integrity": "sha512-GlsNBWiFQFCVi9QVSx7f5AgMeLxe9YCCs5PuP2O2LdjDAA8Jh9eX7lA1Jq/xdXw3Wb3hyvlFNfZIfcRetSzYcA==", + "dev": true, + "dependencies": { + "jest-get-type": "^29.6.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/fake-timers": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/fake-timers/-/fake-timers-29.7.0.tgz", + "integrity": "sha512-q4DH1Ha4TTFPdxLsqDXK1d3+ioSL7yL5oCMJZgDYm6i+6CygW5E5xVr/D1HdsGxjt1ZWSfUAs9OxSB/BNelWrQ==", + "dev": true, + "dependencies": { + "@jest/types": "^29.6.3", + "@sinonjs/fake-timers": "^10.0.2", + "@types/node": "*", + "jest-message-util": "^29.7.0", + "jest-mock": "^29.7.0", + "jest-util": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/globals": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/globals/-/globals-29.7.0.tgz", + "integrity": "sha512-mpiz3dutLbkW2MNFubUGUEVLkTGiqW6yLVTA+JbP6fI6J5iL9Y0Nlg8k95pcF8ctKwCS7WVxteBs29hhfAotzQ==", + "dev": true, + "dependencies": { + "@jest/environment": "^29.7.0", + "@jest/expect": "^29.7.0", + "@jest/types": "^29.6.3", + "jest-mock": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/reporters": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/reporters/-/reporters-29.7.0.tgz", + "integrity": "sha512-DApq0KJbJOEzAFYjHADNNxAE3KbhxQB1y5Kplb5Waqw6zVbuWatSnMjE5gs8FUgEPmNsnZA3NCWl9NG0ia04Pg==", + "dev": true, + "dependencies": { + "@bcoe/v8-coverage": "^0.2.3", + "@jest/console": "^29.7.0", + "@jest/test-result": "^29.7.0", + "@jest/transform": "^29.7.0", + "@jest/types": "^29.6.3", + "@jridgewell/trace-mapping": "^0.3.18", + "@types/node": "*", + "chalk": "^4.0.0", + "collect-v8-coverage": "^1.0.0", + "exit": "^0.1.2", + "glob": "^7.1.3", + "graceful-fs": "^4.2.9", + "istanbul-lib-coverage": "^3.0.0", + "istanbul-lib-instrument": "^6.0.0", + "istanbul-lib-report": "^3.0.0", + "istanbul-lib-source-maps": "^4.0.0", + "istanbul-reports": "^3.1.3", + "jest-message-util": "^29.7.0", + "jest-util": "^29.7.0", + "jest-worker": "^29.7.0", + "slash": "^3.0.0", + "string-length": "^4.0.1", + "strip-ansi": "^6.0.0", + "v8-to-istanbul": "^9.0.1" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "node-notifier": "^8.0.1 || ^9.0.0 || ^10.0.0" + }, + "peerDependenciesMeta": { + "node-notifier": { + "optional": true + } + } + }, + "node_modules/@jest/schemas": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz", + "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==", + "dev": true, + "dependencies": { + "@sinclair/typebox": "^0.27.8" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/source-map": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/source-map/-/source-map-29.6.3.tgz", + "integrity": "sha512-MHjT95QuipcPrpLM+8JMSzFx6eHp5Bm+4XeFDJlwsvVBjmKNiIAvasGK2fxz2WbGRlnvqehFbh07MMa7n3YJnw==", + "dev": true, + "dependencies": { + "@jridgewell/trace-mapping": "^0.3.18", + "callsites": "^3.0.0", + "graceful-fs": "^4.2.9" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/test-result": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/test-result/-/test-result-29.7.0.tgz", + "integrity": "sha512-Fdx+tv6x1zlkJPcWXmMDAG2HBnaR9XPSd5aDWQVsfrZmLVT3lU1cwyxLgRmXR9yrq4NBoEm9BMsfgFzTQAbJYA==", + "dev": true, + "dependencies": { + "@jest/console": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/istanbul-lib-coverage": "^2.0.0", + "collect-v8-coverage": "^1.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/test-sequencer": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/test-sequencer/-/test-sequencer-29.7.0.tgz", + "integrity": "sha512-GQwJ5WZVrKnOJuiYiAF52UNUJXgTZx1NHjFSEB0qEMmSZKAkdMoIzw/Cj6x6NF4AvV23AUqDpFzQkN/eYCYTxw==", + "dev": true, + "dependencies": { + "@jest/test-result": "^29.7.0", + "graceful-fs": "^4.2.9", + "jest-haste-map": "^29.7.0", + "slash": "^3.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/transform": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/transform/-/transform-29.7.0.tgz", + "integrity": "sha512-ok/BTPFzFKVMwO5eOHRrvnBVHdRy9IrsrW1GpMaQ9MCnilNLXQKmAX8s1YXDFaai9xJpac2ySzV0YeRRECr2Vw==", + "dev": true, + "dependencies": { + "@babel/core": "^7.11.6", + "@jest/types": "^29.6.3", + "@jridgewell/trace-mapping": "^0.3.18", + "babel-plugin-istanbul": "^6.1.1", + "chalk": "^4.0.0", + "convert-source-map": "^2.0.0", + "fast-json-stable-stringify": "^2.1.0", + "graceful-fs": "^4.2.9", + "jest-haste-map": "^29.7.0", + "jest-regex-util": "^29.6.3", + "jest-util": "^29.7.0", + "micromatch": "^4.0.4", + "pirates": "^4.0.4", + "slash": "^3.0.0", + "write-file-atomic": "^4.0.2" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/types": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz", + "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==", + "dev": true, + "dependencies": { + "@jest/schemas": "^29.6.3", + "@types/istanbul-lib-coverage": "^2.0.0", + "@types/istanbul-reports": "^3.0.0", + "@types/node": "*", + "@types/yargs": "^17.0.8", + "chalk": "^4.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jridgewell/gen-mapping": { + "version": "0.3.3", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.3.tgz", + "integrity": "sha512-HLhSWOLRi875zjjMG/r+Nv0oCW8umGb0BgEhyX3dDX3egwZtB8PqLnjz3yedt8R5StBrzcg4aBpnh8UA9D1BoQ==", + "dev": true, + "dependencies": { + "@jridgewell/set-array": "^1.0.1", + "@jridgewell/sourcemap-codec": "^1.4.10", + "@jridgewell/trace-mapping": "^0.3.9" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/resolve-uri": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.1.tgz", + "integrity": "sha512-dSYZh7HhCDtCKm4QakX0xFpsRDqjjtZf/kjI/v3T3Nwt5r8/qz/M19F9ySyOqU94SXBmeG9ttTul+YnR4LOxFA==", + "dev": true, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/set-array": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.1.2.tgz", + "integrity": "sha512-xnkseuNADM0gt2bs+BvhO0p78Mk762YnZdsuzFV018NoG1Sj1SCQvpSqa7XUaTam5vAGasABV9qXASMKnFMwMw==", + "dev": true, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.4.15", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.15.tgz", + "integrity": "sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg==", + "dev": true + }, + "node_modules/@jridgewell/trace-mapping": { + "version": "0.3.19", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.19.tgz", + "integrity": "sha512-kf37QtfW+Hwx/buWGMPcR60iF9ziHa6r/CZJIHbmcm4+0qrXiVdxegAH0F6yddEVQ7zdkjcGCgCzUu+BcbhQxw==", + "dev": true, + "dependencies": { + "@jridgewell/resolve-uri": "^3.1.0", + "@jridgewell/sourcemap-codec": "^1.4.14" + } + }, + "node_modules/@sinclair/typebox": { + "version": "0.27.8", + "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz", + "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==", + "dev": true + }, + "node_modules/@sinonjs/commons": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/@sinonjs/commons/-/commons-3.0.0.tgz", + "integrity": "sha512-jXBtWAF4vmdNmZgD5FoKsVLv3rPgDnLgPbU84LIJ3otV44vJlDRokVng5v8NFJdCf/da9legHcKaRuZs4L7faA==", + "dev": true, + "dependencies": { + "type-detect": "4.0.8" + } + }, + "node_modules/@sinonjs/fake-timers": { + "version": "10.3.0", + "resolved": "https://registry.npmjs.org/@sinonjs/fake-timers/-/fake-timers-10.3.0.tgz", + "integrity": "sha512-V4BG07kuYSUkTCSBHG8G8TNhM+F19jXFWnQtzj+we8DrkpSBCee9Z3Ms8yiGer/dlmhe35/Xdgyo3/0rQKg7YA==", + "dev": true, + "dependencies": { + "@sinonjs/commons": "^3.0.0" + } + }, + "node_modules/@tootallnate/once": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@tootallnate/once/-/once-2.0.0.tgz", + "integrity": "sha512-XCuKFP5PS55gnMVu3dty8KPatLqUoy/ZYzDzAGCQ8JNFCkLXzmI7vNHCR+XpbZaMWQK/vQubr7PkYq8g470J/A==", + "dev": true, + "engines": { + "node": ">= 10" + } + }, + "node_modules/@types/babel__core": { + "version": "7.20.2", + "resolved": "https://registry.npmjs.org/@types/babel__core/-/babel__core-7.20.2.tgz", + "integrity": "sha512-pNpr1T1xLUc2l3xJKuPtsEky3ybxN3m4fJkknfIpTCTfIZCDW57oAg+EfCgIIp2rvCe0Wn++/FfodDS4YXxBwA==", + "dev": true, + "dependencies": { + "@babel/parser": "^7.20.7", + "@babel/types": "^7.20.7", + "@types/babel__generator": "*", + "@types/babel__template": "*", + "@types/babel__traverse": "*" + } + }, + "node_modules/@types/babel__generator": { + "version": "7.6.5", + "resolved": "https://registry.npmjs.org/@types/babel__generator/-/babel__generator-7.6.5.tgz", + "integrity": "sha512-h9yIuWbJKdOPLJTbmSpPzkF67e659PbQDba7ifWm5BJ8xTv+sDmS7rFmywkWOvXedGTivCdeGSIIX8WLcRTz8w==", + "dev": true, + "dependencies": { + "@babel/types": "^7.0.0" + } + }, + "node_modules/@types/babel__template": { + "version": "7.4.2", + "resolved": "https://registry.npmjs.org/@types/babel__template/-/babel__template-7.4.2.tgz", + "integrity": "sha512-/AVzPICMhMOMYoSx9MoKpGDKdBRsIXMNByh1PXSZoa+v6ZoLa8xxtsT/uLQ/NJm0XVAWl/BvId4MlDeXJaeIZQ==", + "dev": true, + "dependencies": { + "@babel/parser": "^7.1.0", + "@babel/types": "^7.0.0" + } + }, + "node_modules/@types/babel__traverse": { + "version": "7.20.2", + "resolved": "https://registry.npmjs.org/@types/babel__traverse/-/babel__traverse-7.20.2.tgz", + "integrity": "sha512-ojlGK1Hsfce93J0+kn3H5R73elidKUaZonirN33GSmgTUMpzI/MIFfSpF3haANe3G1bEBS9/9/QEqwTzwqFsKw==", + "dev": true, + "dependencies": { + "@babel/types": "^7.20.7" + } + }, + "node_modules/@types/graceful-fs": { + "version": "4.1.7", + "resolved": "https://registry.npmjs.org/@types/graceful-fs/-/graceful-fs-4.1.7.tgz", + "integrity": "sha512-MhzcwU8aUygZroVwL2jeYk6JisJrPl/oov/gsgGCue9mkgl9wjGbzReYQClxiUgFDnib9FuHqTndccKeZKxTRw==", + "dev": true, + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/istanbul-lib-coverage": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/@types/istanbul-lib-coverage/-/istanbul-lib-coverage-2.0.4.tgz", + "integrity": "sha512-z/QT1XN4K4KYuslS23k62yDIDLwLFkzxOuMplDtObz0+y7VqJCaO2o+SPwHCvLFZh7xazvvoor2tA/hPz9ee7g==", + "dev": true + }, + "node_modules/@types/istanbul-lib-report": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@types/istanbul-lib-report/-/istanbul-lib-report-3.0.1.tgz", + "integrity": "sha512-gPQuzaPR5h/djlAv2apEG1HVOyj1IUs7GpfMZixU0/0KXT3pm64ylHuMUI1/Akh+sq/iikxg6Z2j+fcMDXaaTQ==", + "dev": true, + "dependencies": { + "@types/istanbul-lib-coverage": "*" + } + }, + "node_modules/@types/istanbul-reports": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/@types/istanbul-reports/-/istanbul-reports-3.0.2.tgz", + "integrity": "sha512-kv43F9eb3Lhj+lr/Hn6OcLCs/sSM8bt+fIaP11rCYngfV6NVjzWXJ17owQtDQTL9tQ8WSLUrGsSJ6rJz0F1w1A==", + "dev": true, + "dependencies": { + "@types/istanbul-lib-report": "*" + } + }, + "node_modules/@types/jest": { + "version": "29.5.5", + "resolved": "https://registry.npmjs.org/@types/jest/-/jest-29.5.5.tgz", + "integrity": "sha512-ebylz2hnsWR9mYvmBFbXJXr+33UPc4+ZdxyDXh5w0FlPBTfCVN3wPL+kuOiQt3xvrK419v7XWeAs+AeOksafXg==", + "dev": true, + "dependencies": { + "expect": "^29.0.0", + "pretty-format": "^29.0.0" + } + }, + "node_modules/@types/jsdom": { + "version": "20.0.1", + "resolved": "https://registry.npmjs.org/@types/jsdom/-/jsdom-20.0.1.tgz", + "integrity": "sha512-d0r18sZPmMQr1eG35u12FZfhIXNrnsPU/g5wvRKCUf/tOGilKKwYMYGqh33BNR6ba+2gkHw1EUiHoN3mn7E5IQ==", + "dev": true, + "dependencies": { + "@types/node": "*", + "@types/tough-cookie": "*", + "parse5": "^7.0.0" + } + }, + "node_modules/@types/node": { + "version": "20.8.3", + "resolved": "https://registry.npmjs.org/@types/node/-/node-20.8.3.tgz", + "integrity": "sha512-jxiZQFpb+NlH5kjW49vXxvxTjeeqlbsnTAdBTKpzEdPs9itay7MscYXz3Fo9VYFEsfQ6LJFitHad3faerLAjCw==", + "dev": true + }, + "node_modules/@types/stack-utils": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/@types/stack-utils/-/stack-utils-2.0.1.tgz", + "integrity": "sha512-Hl219/BT5fLAaz6NDkSuhzasy49dwQS/DSdu4MdggFB8zcXv7vflBI3xp7FEmkmdDkBUI2bPUNeMttp2knYdxw==", + "dev": true + }, + "node_modules/@types/tough-cookie": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/@types/tough-cookie/-/tough-cookie-4.0.3.tgz", + "integrity": "sha512-THo502dA5PzG/sfQH+42Lw3fvmYkceefOspdCwpHRul8ik2Jv1K8I5OZz1AT3/rs46kwgMCe9bSBmDLYkkOMGg==", + "dev": true + }, + "node_modules/@types/yargs": { + "version": "17.0.28", + "resolved": "https://registry.npmjs.org/@types/yargs/-/yargs-17.0.28.tgz", + "integrity": "sha512-N3e3fkS86hNhtk6BEnc0rj3zcehaxx8QWhCROJkqpl5Zaoi7nAic3jH8q94jVD3zu5LGk+PUB6KAiDmimYOEQw==", + "dev": true, + "dependencies": { + "@types/yargs-parser": "*" + } + }, + "node_modules/@types/yargs-parser": { + "version": "21.0.1", + "resolved": "https://registry.npmjs.org/@types/yargs-parser/-/yargs-parser-21.0.1.tgz", + "integrity": "sha512-axdPBuLuEJt0c4yI5OZssC19K2Mq1uKdrfZBzuxLvaztgqUtFYZUNw7lETExPYJR9jdEoIg4mb7RQKRQzOkeGQ==", + "dev": true + }, + "node_modules/abab": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/abab/-/abab-2.0.6.tgz", + "integrity": "sha512-j2afSsaIENvHZN2B8GOpF566vZ5WVk5opAiMTvWgaQT8DkbOqsTfvNAvHoRGU2zzP8cPoqys+xHTRDWW8L+/BA==", + "dev": true + }, + "node_modules/acorn": { + "version": "8.10.0", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.10.0.tgz", + "integrity": "sha512-F0SAmZ8iUtS//m8DmCTA0jlh6TDKkHQyK6xc6V4KDTyZKA9dnvX9/3sRTVQrWm79glUAZbnmmNcdYwUIHWVybw==", + "dev": true, + "bin": { + "acorn": "bin/acorn" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/acorn-globals": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/acorn-globals/-/acorn-globals-7.0.1.tgz", + "integrity": "sha512-umOSDSDrfHbTNPuNpC2NSnnA3LUrqpevPb4T9jRx4MagXNS0rs+gwiTcAvqCRmsD6utzsrzNt+ebm00SNWiC3Q==", + "dev": true, + "dependencies": { + "acorn": "^8.1.0", + "acorn-walk": "^8.0.2" + } + }, + "node_modules/acorn-walk": { + "version": "8.2.0", + "resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-8.2.0.tgz", + "integrity": "sha512-k+iyHEuPgSw6SbuDpGQM+06HQUa04DZ3o+F6CSzXMvvI5KMvnaEqXe+YVe555R9nn6GPt404fos4wcgpw12SDA==", + "dev": true, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/agent-base": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-6.0.2.tgz", + "integrity": "sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ==", + "dev": true, + "dependencies": { + "debug": "4" + }, + "engines": { + "node": ">= 6.0.0" + } + }, + "node_modules/ansi-escapes": { + "version": "4.3.2", + "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-4.3.2.tgz", + "integrity": "sha512-gKXj5ALrKWQLsYG9jlTRmR/xKluxHV+Z9QEwNIgCfM1/uwPMCuzVVnh5mwTd+OuBZcwSIMbqssNWRm1lE51QaQ==", + "dev": true, + "dependencies": { + "type-fest": "^0.21.3" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/anymatch": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz", + "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==", + "dev": true, + "dependencies": { + "normalize-path": "^3.0.0", + "picomatch": "^2.0.4" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/argparse": { + "version": "1.0.10", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", + "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", + "dev": true, + "dependencies": { + "sprintf-js": "~1.0.2" + } + }, + "node_modules/asynckit": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", + "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==", + "dev": true + }, + "node_modules/babel-jest": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/babel-jest/-/babel-jest-29.7.0.tgz", + "integrity": "sha512-BrvGY3xZSwEcCzKvKsCi2GgHqDqsYkOP4/by5xCgIwGXQxIEh+8ew3gmrE1y7XRR6LHZIj6yLYnUi/mm2KXKBg==", + "dev": true, + "dependencies": { + "@jest/transform": "^29.7.0", + "@types/babel__core": "^7.1.14", + "babel-plugin-istanbul": "^6.1.1", + "babel-preset-jest": "^29.6.3", + "chalk": "^4.0.0", + "graceful-fs": "^4.2.9", + "slash": "^3.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "@babel/core": "^7.8.0" + } + }, + "node_modules/babel-plugin-istanbul": { + "version": "6.1.1", + "resolved": "https://registry.npmjs.org/babel-plugin-istanbul/-/babel-plugin-istanbul-6.1.1.tgz", + "integrity": "sha512-Y1IQok9821cC9onCx5otgFfRm7Lm+I+wwxOx738M/WLPZ9Q42m4IG5W0FNX8WLL2gYMZo3JkuXIH2DOpWM+qwA==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.0.0", + "@istanbuljs/load-nyc-config": "^1.0.0", + "@istanbuljs/schema": "^0.1.2", + "istanbul-lib-instrument": "^5.0.4", + "test-exclude": "^6.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/babel-plugin-istanbul/node_modules/istanbul-lib-instrument": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/istanbul-lib-instrument/-/istanbul-lib-instrument-5.2.1.tgz", + "integrity": "sha512-pzqtp31nLv/XFOzXGuvhCb8qhjmTVo5vjVk19XE4CRlSWz0KoeJ3bw9XsA7nOp9YBf4qHjwBxkDzKcME/J29Yg==", + "dev": true, + "dependencies": { + "@babel/core": "^7.12.3", + "@babel/parser": "^7.14.7", + "@istanbuljs/schema": "^0.1.2", + "istanbul-lib-coverage": "^3.2.0", + "semver": "^6.3.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/babel-plugin-jest-hoist": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/babel-plugin-jest-hoist/-/babel-plugin-jest-hoist-29.6.3.tgz", + "integrity": "sha512-ESAc/RJvGTFEzRwOTT4+lNDk/GNHMkKbNzsvT0qKRfDyyYTskxB5rnU2njIDYVxXCBHHEI1c0YwHob3WaYujOg==", + "dev": true, + "dependencies": { + "@babel/template": "^7.3.3", + "@babel/types": "^7.3.3", + "@types/babel__core": "^7.1.14", + "@types/babel__traverse": "^7.0.6" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/babel-plugin-polyfill-corejs2": { + "version": "0.4.5", + "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs2/-/babel-plugin-polyfill-corejs2-0.4.5.tgz", + "integrity": "sha512-19hwUH5FKl49JEsvyTcoHakh6BE0wgXLLptIyKZ3PijHc/Ci521wygORCUCCred+E/twuqRyAkE02BAWPmsHOg==", + "dev": true, + "dependencies": { + "@babel/compat-data": "^7.22.6", + "@babel/helper-define-polyfill-provider": "^0.4.2", + "semver": "^6.3.1" + }, + "peerDependencies": { + "@babel/core": "^7.4.0 || ^8.0.0-0 <8.0.0" + } + }, + "node_modules/babel-plugin-polyfill-corejs3": { + "version": "0.8.4", + "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs3/-/babel-plugin-polyfill-corejs3-0.8.4.tgz", + "integrity": "sha512-9l//BZZsPR+5XjyJMPtZSK4jv0BsTO1zDac2GC6ygx9WLGlcsnRd1Co0B2zT5fF5Ic6BZy+9m3HNZ3QcOeDKfg==", + "dev": true, + "dependencies": { + "@babel/helper-define-polyfill-provider": "^0.4.2", + "core-js-compat": "^3.32.2" + }, + "peerDependencies": { + "@babel/core": "^7.4.0 || ^8.0.0-0 <8.0.0" + } + }, + "node_modules/babel-plugin-polyfill-regenerator": { + "version": "0.5.2", + "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-regenerator/-/babel-plugin-polyfill-regenerator-0.5.2.tgz", + "integrity": "sha512-tAlOptU0Xj34V1Y2PNTL4Y0FOJMDB6bZmoW39FeCQIhigGLkqu3Fj6uiXpxIf6Ij274ENdYx64y6Au+ZKlb1IA==", + "dev": true, + "dependencies": { + "@babel/helper-define-polyfill-provider": "^0.4.2" + }, + "peerDependencies": { + "@babel/core": "^7.4.0 || ^8.0.0-0 <8.0.0" + } + }, + "node_modules/babel-preset-current-node-syntax": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/babel-preset-current-node-syntax/-/babel-preset-current-node-syntax-1.0.1.tgz", + "integrity": "sha512-M7LQ0bxarkxQoN+vz5aJPsLBn77n8QgTFmo8WK0/44auK2xlCXrYcUxHFxgU7qW5Yzw/CjmLRK2uJzaCd7LvqQ==", + "dev": true, + "dependencies": { + "@babel/plugin-syntax-async-generators": "^7.8.4", + "@babel/plugin-syntax-bigint": "^7.8.3", + "@babel/plugin-syntax-class-properties": "^7.8.3", + "@babel/plugin-syntax-import-meta": "^7.8.3", + "@babel/plugin-syntax-json-strings": "^7.8.3", + "@babel/plugin-syntax-logical-assignment-operators": "^7.8.3", + "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3", + "@babel/plugin-syntax-numeric-separator": "^7.8.3", + "@babel/plugin-syntax-object-rest-spread": "^7.8.3", + "@babel/plugin-syntax-optional-catch-binding": "^7.8.3", + "@babel/plugin-syntax-optional-chaining": "^7.8.3", + "@babel/plugin-syntax-top-level-await": "^7.8.3" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/babel-preset-jest": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/babel-preset-jest/-/babel-preset-jest-29.6.3.tgz", + "integrity": "sha512-0B3bhxR6snWXJZtR/RliHTDPRgn1sNHOR0yVtq/IiQFyuOVjFS+wuio/R4gSNkyYmKmJB4wGZv2NZanmKmTnNA==", + "dev": true, + "dependencies": { + "babel-plugin-jest-hoist": "^29.6.3", + "babel-preset-current-node-syntax": "^1.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/balanced-match": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", + "dev": true + }, + "node_modules/brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dev": true, + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/braces": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", + "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", + "dev": true, + "dependencies": { + "fill-range": "^7.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/browserslist": { + "version": "4.22.1", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.22.1.tgz", + "integrity": "sha512-FEVc202+2iuClEhZhrWy6ZiAcRLvNMyYcxZ8raemul1DYVOVdFsbqckWLdsixQZCpJlwe77Z3UTalE7jsjnKfQ==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "dependencies": { + "caniuse-lite": "^1.0.30001541", + "electron-to-chromium": "^1.4.535", + "node-releases": "^2.0.13", + "update-browserslist-db": "^1.0.13" + }, + "bin": { + "browserslist": "cli.js" + }, + "engines": { + "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7" + } + }, + "node_modules/bser": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/bser/-/bser-2.1.1.tgz", + "integrity": "sha512-gQxTNE/GAfIIrmHLUE3oJyp5FO6HRBfhjnw4/wMmA63ZGDJnWBmgY/lyQBpnDUkGmAhbSe39tx2d/iTOAfglwQ==", + "dev": true, + "dependencies": { + "node-int64": "^0.4.0" + } + }, + "node_modules/buffer-from": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz", + "integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==", + "dev": true + }, + "node_modules/callsites": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", + "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/camelcase": { + "version": "5.3.1", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz", + "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/caniuse-lite": { + "version": "1.0.30001546", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001546.tgz", + "integrity": "sha512-zvtSJwuQFpewSyRrI3AsftF6rM0X80mZkChIt1spBGEvRglCrjTniXvinc8JKRoqTwXAgvqTImaN9igfSMtUBw==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/caniuse-lite" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ] + }, + "node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dev": true, + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/char-regex": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/char-regex/-/char-regex-1.0.2.tgz", + "integrity": "sha512-kWWXztvZ5SBQV+eRgKFeh8q5sLuZY2+8WUIzlxWVTg+oGwY14qylx1KbKzHd8P6ZYkAg0xyIDU9JMHhyJMZ1jw==", + "dev": true, + "engines": { + "node": ">=10" + } + }, + "node_modules/ci-info": { + "version": "3.9.0", + "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-3.9.0.tgz", + "integrity": "sha512-NIxF55hv4nSqQswkAeiOi1r83xy8JldOFDTWiug55KBu9Jnblncd2U6ViHmYgHf01TPZS77NJBhBMKdWj9HQMQ==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/sibiraj-s" + } + ], + "engines": { + "node": ">=8" + } + }, + "node_modules/cjs-module-lexer": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/cjs-module-lexer/-/cjs-module-lexer-1.2.3.tgz", + "integrity": "sha512-0TNiGstbQmCFwt4akjjBg5pLRTSyj/PkWQ1ZoO2zntmg9yLqSRxwEa4iCfQLGjqhiqBfOJa7W/E8wfGrTDmlZQ==", + "dev": true + }, + "node_modules/cliui": { + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz", + "integrity": "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==", + "dev": true, + "dependencies": { + "string-width": "^4.2.0", + "strip-ansi": "^6.0.1", + "wrap-ansi": "^7.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/co": { + "version": "4.6.0", + "resolved": "https://registry.npmjs.org/co/-/co-4.6.0.tgz", + "integrity": "sha512-QVb0dM5HvG+uaxitm8wONl7jltx8dqhfU33DcqtOZcLSVIKSDDLDi7+0LbAKiyI8hD9u42m2YxXSkMGWThaecQ==", + "dev": true, + "engines": { + "iojs": ">= 1.0.0", + "node": ">= 0.12.0" + } + }, + "node_modules/collect-v8-coverage": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/collect-v8-coverage/-/collect-v8-coverage-1.0.2.tgz", + "integrity": "sha512-lHl4d5/ONEbLlJvaJNtsF/Lz+WvB07u2ycqTYbdrq7UypDXailES4valYb2eWiJFxZlVmpGekfqoxQhzyFdT4Q==", + "dev": true + }, + "node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true + }, + "node_modules/combined-stream": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", + "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", + "dev": true, + "dependencies": { + "delayed-stream": "~1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/concat-map": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", + "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==", + "dev": true + }, + "node_modules/convert-source-map": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz", + "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==", + "dev": true + }, + "node_modules/core-js-compat": { + "version": "3.33.0", + "resolved": "https://registry.npmjs.org/core-js-compat/-/core-js-compat-3.33.0.tgz", + "integrity": "sha512-0w4LcLXsVEuNkIqwjjf9rjCoPhK8uqA4tMRh4Ge26vfLtUutshn+aRJU21I9LCJlh2QQHfisNToLjw1XEJLTWw==", + "dev": true, + "dependencies": { + "browserslist": "^4.22.1" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/core-js" + } + }, + "node_modules/create-jest": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/create-jest/-/create-jest-29.7.0.tgz", + "integrity": "sha512-Adz2bdH0Vq3F53KEMJOoftQFutWCukm6J24wbPWRO4k1kMY7gS7ds/uoJkNuV8wDCtWWnuwGcJwpWcih+zEW1Q==", + "dev": true, + "dependencies": { + "@jest/types": "^29.6.3", + "chalk": "^4.0.0", + "exit": "^0.1.2", + "graceful-fs": "^4.2.9", + "jest-config": "^29.7.0", + "jest-util": "^29.7.0", + "prompts": "^2.0.1" + }, + "bin": { + "create-jest": "bin/create-jest.js" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/cross-spawn": { + "version": "7.0.3", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", + "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", + "dev": true, + "dependencies": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/cssom": { + "version": "0.5.0", + "resolved": "https://registry.npmjs.org/cssom/-/cssom-0.5.0.tgz", + "integrity": "sha512-iKuQcq+NdHqlAcwUY0o/HL69XQrUaQdMjmStJ8JFmUaiiQErlhrmuigkg/CU4E2J0IyUKUrMAgl36TvN67MqTw==", + "dev": true + }, + "node_modules/cssstyle": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/cssstyle/-/cssstyle-2.3.0.tgz", + "integrity": "sha512-AZL67abkUzIuvcHqk7c09cezpGNcxUxU4Ioi/05xHk4DQeTkWmGYftIE6ctU6AEt+Gn4n1lDStOtj7FKycP71A==", + "dev": true, + "dependencies": { + "cssom": "~0.3.6" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/cssstyle/node_modules/cssom": { + "version": "0.3.8", + "resolved": "https://registry.npmjs.org/cssom/-/cssom-0.3.8.tgz", + "integrity": "sha512-b0tGHbfegbhPJpxpiBPU2sCkigAqtM9O121le6bbOlgyV+NyGyCmVfJ6QW9eRjz8CpNfWEOYBIMIGRYkLwsIYg==", + "dev": true + }, + "node_modules/data-urls": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/data-urls/-/data-urls-3.0.2.tgz", + "integrity": "sha512-Jy/tj3ldjZJo63sVAvg6LHt2mHvl4V6AgRAmNDtLdm7faqtsx+aJG42rsyCo9JCoRVKwPFzKlIPx3DIibwSIaQ==", + "dev": true, + "dependencies": { + "abab": "^2.0.6", + "whatwg-mimetype": "^3.0.0", + "whatwg-url": "^11.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/debug": { + "version": "4.3.4", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", + "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", + "dev": true, + "dependencies": { + "ms": "2.1.2" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/decimal.js": { + "version": "10.4.3", + "resolved": "https://registry.npmjs.org/decimal.js/-/decimal.js-10.4.3.tgz", + "integrity": "sha512-VBBaLc1MgL5XpzgIP7ny5Z6Nx3UrRkIViUkPUdtl9aya5amy3De1gsUUSB1g3+3sExYNjCAsAznmukyxCb1GRA==", + "dev": true + }, + "node_modules/dedent": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/dedent/-/dedent-1.5.1.tgz", + "integrity": "sha512-+LxW+KLWxu3HW3M2w2ympwtqPrqYRzU8fqi6Fhd18fBALe15blJPI/I4+UHveMVG6lJqB4JNd4UG0S5cnVHwIg==", + "dev": true, + "peerDependencies": { + "babel-plugin-macros": "^3.1.0" + }, + "peerDependenciesMeta": { + "babel-plugin-macros": { + "optional": true + } + } + }, + "node_modules/deepmerge": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/deepmerge/-/deepmerge-4.3.1.tgz", + "integrity": "sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/delayed-stream": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", + "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==", + "dev": true, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/detect-newline": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/detect-newline/-/detect-newline-3.1.0.tgz", + "integrity": "sha512-TLz+x/vEXm/Y7P7wn1EJFNLxYpUD4TgMosxY6fAVJUnJMbupHBOncxyWUG9OpTaH9EBD7uFI5LfEgmMOc54DsA==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/diff-sequences": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/diff-sequences/-/diff-sequences-29.6.3.tgz", + "integrity": "sha512-EjePK1srD3P08o2j4f0ExnylqRs5B9tJjcp9t1krH2qRi8CCdsYfwe9JgSLurFBWwq4uOlipzfk5fHNvwFKr8Q==", + "dev": true, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/domexception": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/domexception/-/domexception-4.0.0.tgz", + "integrity": "sha512-A2is4PLG+eeSfoTMA95/s4pvAoSo2mKtiM5jlHkAVewmiO8ISFTFKZjH7UAM1Atli/OT/7JHOrJRJiMKUZKYBw==", + "dev": true, + "dependencies": { + "webidl-conversions": "^7.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/electron-to-chromium": { + "version": "1.4.544", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.544.tgz", + "integrity": "sha512-54z7squS1FyFRSUqq/knOFSptjjogLZXbKcYk3B0qkE1KZzvqASwRZnY2KzZQJqIYLVD38XZeoiMRflYSwyO4w==", + "dev": true + }, + "node_modules/emittery": { + "version": "0.13.1", + "resolved": "https://registry.npmjs.org/emittery/-/emittery-0.13.1.tgz", + "integrity": "sha512-DeWwawk6r5yR9jFgnDKYt4sLS0LmHJJi3ZOnb5/JdbYwj3nW+FxQnHIjhBKz8YLC7oRNPVM9NQ47I3CVx34eqQ==", + "dev": true, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sindresorhus/emittery?sponsor=1" + } + }, + "node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "dev": true + }, + "node_modules/entities": { + "version": "4.5.0", + "resolved": "https://registry.npmjs.org/entities/-/entities-4.5.0.tgz", + "integrity": "sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw==", + "dev": true, + "engines": { + "node": ">=0.12" + }, + "funding": { + "url": "https://github.com/fb55/entities?sponsor=1" + } + }, + "node_modules/error-ex": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.2.tgz", + "integrity": "sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==", + "dev": true, + "dependencies": { + "is-arrayish": "^0.2.1" + } + }, + "node_modules/escalade": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz", + "integrity": "sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/escape-string-regexp": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-2.0.0.tgz", + "integrity": "sha512-UpzcLCXolUWcNu5HtVMHYdXJjArjsF9C0aNnquZYY4uW/Vu0miy5YoWvbV345HauVvcAUnpRuhMMcqTcGOY2+w==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/escodegen": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/escodegen/-/escodegen-2.1.0.tgz", + "integrity": "sha512-2NlIDTwUWJN0mRPQOdtQBzbUHvdGY2P1VXSyU83Q3xKxM7WHX2Ql8dKq782Q9TgQUNOLEzEYu9bzLNj1q88I5w==", + "dev": true, + "dependencies": { + "esprima": "^4.0.1", + "estraverse": "^5.2.0", + "esutils": "^2.0.2" + }, + "bin": { + "escodegen": "bin/escodegen.js", + "esgenerate": "bin/esgenerate.js" + }, + "engines": { + "node": ">=6.0" + }, + "optionalDependencies": { + "source-map": "~0.6.1" + } + }, + "node_modules/esprima": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", + "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==", + "dev": true, + "bin": { + "esparse": "bin/esparse.js", + "esvalidate": "bin/esvalidate.js" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/estraverse": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", + "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", + "dev": true, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/esutils": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", + "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/execa": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/execa/-/execa-5.1.1.tgz", + "integrity": "sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg==", + "dev": true, + "dependencies": { + "cross-spawn": "^7.0.3", + "get-stream": "^6.0.0", + "human-signals": "^2.1.0", + "is-stream": "^2.0.0", + "merge-stream": "^2.0.0", + "npm-run-path": "^4.0.1", + "onetime": "^5.1.2", + "signal-exit": "^3.0.3", + "strip-final-newline": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sindresorhus/execa?sponsor=1" + } + }, + "node_modules/exit": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/exit/-/exit-0.1.2.tgz", + "integrity": "sha512-Zk/eNKV2zbjpKzrsQ+n1G6poVbErQxJ0LBOJXaKZ1EViLzH+hrLu9cdXI4zw9dBQJslwBEpbQ2P1oS7nDxs6jQ==", + "dev": true, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/expect": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/expect/-/expect-29.7.0.tgz", + "integrity": "sha512-2Zks0hf1VLFYI1kbh0I5jP3KHHyCHpkfyHBzsSXRFgl/Bg9mWYfMW8oD+PdMPlEwy5HNsR9JutYy6pMeOh61nw==", + "dev": true, + "dependencies": { + "@jest/expect-utils": "^29.7.0", + "jest-get-type": "^29.6.3", + "jest-matcher-utils": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-util": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/fast-json-stable-stringify": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", + "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==", + "dev": true + }, + "node_modules/fb-watchman": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/fb-watchman/-/fb-watchman-2.0.2.tgz", + "integrity": "sha512-p5161BqbuCaSnB8jIbzQHOlpgsPmK5rJVDfDKO91Axs5NC1uu3HRQm6wt9cd9/+GtQQIO53JdGXXoyDpTAsgYA==", + "dev": true, + "dependencies": { + "bser": "2.1.1" + } + }, + "node_modules/fill-range": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", + "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", + "dev": true, + "dependencies": { + "to-regex-range": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/find-up": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz", + "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==", + "dev": true, + "dependencies": { + "locate-path": "^5.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/form-data": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.0.tgz", + "integrity": "sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww==", + "dev": true, + "dependencies": { + "asynckit": "^0.4.0", + "combined-stream": "^1.0.8", + "mime-types": "^2.1.12" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/fs.realpath": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", + "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==", + "dev": true + }, + "node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "dev": true, + "hasInstallScript": true, + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/gensync": { + "version": "1.0.0-beta.2", + "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz", + "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==", + "dev": true, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/get-caller-file": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", + "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", + "dev": true, + "engines": { + "node": "6.* || 8.* || >= 10.*" + } + }, + "node_modules/get-package-type": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/get-package-type/-/get-package-type-0.1.0.tgz", + "integrity": "sha512-pjzuKtY64GYfWizNAJ0fr9VqttZkNiK2iS430LtIHzjBEr6bX8Am2zm4sW4Ro5wjWW5cAlRL1qAMTcXbjNAO2Q==", + "dev": true, + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/get-stream": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-6.0.1.tgz", + "integrity": "sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/glob": { + "version": "7.2.3", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", + "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "dev": true, + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/globals": { + "version": "11.12.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz", + "integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/graceful-fs": { + "version": "4.2.11", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz", + "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==", + "dev": true + }, + "node_modules/has": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/has/-/has-1.0.4.tgz", + "integrity": "sha512-qdSAmqLF6209RFj4VVItywPMbm3vWylknmB3nvNiUIs72xAimcM8nVYxYr7ncvZq5qzk9MKIZR8ijqD/1QuYjQ==", + "dev": true, + "engines": { + "node": ">= 0.4.0" + } + }, + "node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/html-encoding-sniffer": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/html-encoding-sniffer/-/html-encoding-sniffer-3.0.0.tgz", + "integrity": "sha512-oWv4T4yJ52iKrufjnyZPkrN0CH3QnrUqdB6In1g5Fe1mia8GmF36gnfNySxoZtxD5+NmYw1EElVXiBk93UeskA==", + "dev": true, + "dependencies": { + "whatwg-encoding": "^2.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/html-escaper": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/html-escaper/-/html-escaper-2.0.2.tgz", + "integrity": "sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg==", + "dev": true + }, + "node_modules/http-proxy-agent": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/http-proxy-agent/-/http-proxy-agent-5.0.0.tgz", + "integrity": "sha512-n2hY8YdoRE1i7r6M0w9DIw5GgZN0G25P8zLCRQ8rjXtTU3vsNFBI/vWK/UIeE6g5MUUz6avwAPXmL6Fy9D/90w==", + "dev": true, + "dependencies": { + "@tootallnate/once": "2", + "agent-base": "6", + "debug": "4" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/https-proxy-agent": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-5.0.1.tgz", + "integrity": "sha512-dFcAjpTQFgoLMzC2VwU+C/CbS7uRL0lWmxDITmqm7C+7F0Odmj6s9l6alZc6AELXhrnggM2CeWSXHGOdX2YtwA==", + "dev": true, + "dependencies": { + "agent-base": "6", + "debug": "4" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/human-signals": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-2.1.0.tgz", + "integrity": "sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw==", + "dev": true, + "engines": { + "node": ">=10.17.0" + } + }, + "node_modules/iconv-lite": { + "version": "0.6.3", + "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.6.3.tgz", + "integrity": "sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==", + "dev": true, + "dependencies": { + "safer-buffer": ">= 2.1.2 < 3.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/import-local": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/import-local/-/import-local-3.1.0.tgz", + "integrity": "sha512-ASB07uLtnDs1o6EHjKpX34BKYDSqnFerfTOJL2HvMqF70LnxpjkzDB8J44oT9pu4AMPkQwf8jl6szgvNd2tRIg==", + "dev": true, + "dependencies": { + "pkg-dir": "^4.2.0", + "resolve-cwd": "^3.0.0" + }, + "bin": { + "import-local-fixture": "fixtures/cli.js" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/imurmurhash": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", + "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==", + "dev": true, + "engines": { + "node": ">=0.8.19" + } + }, + "node_modules/inflight": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", + "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", + "dev": true, + "dependencies": { + "once": "^1.3.0", + "wrappy": "1" + } + }, + "node_modules/inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", + "dev": true + }, + "node_modules/is-arrayish": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz", + "integrity": "sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==", + "dev": true + }, + "node_modules/is-core-module": { + "version": "2.13.0", + "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.13.0.tgz", + "integrity": "sha512-Z7dk6Qo8pOCp3l4tsX2C5ZVas4V+UxwQodwZhLopL91TX8UyyHEXafPcyoeeWuLrwzHcr3igO78wNLwHJHsMCQ==", + "dev": true, + "dependencies": { + "has": "^1.0.3" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/is-generator-fn": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-generator-fn/-/is-generator-fn-2.1.0.tgz", + "integrity": "sha512-cTIB4yPYL/Grw0EaSzASzg6bBy9gqCofvWN8okThAYIxKJZC+udlRAmGbM0XLeniEJSs8uEgHPGuHSe1XsOLSQ==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "dev": true, + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/is-potential-custom-element-name": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/is-potential-custom-element-name/-/is-potential-custom-element-name-1.0.1.tgz", + "integrity": "sha512-bCYeRA2rVibKZd+s2625gGnGF/t7DSqDs4dP7CrLA1m7jKWz6pps0LpYLJN8Q64HtmPKJ1hrN3nzPNKFEKOUiQ==", + "dev": true + }, + "node_modules/is-stream": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.1.tgz", + "integrity": "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==", + "dev": true, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", + "dev": true + }, + "node_modules/istanbul-lib-coverage": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/istanbul-lib-coverage/-/istanbul-lib-coverage-3.2.0.tgz", + "integrity": "sha512-eOeJ5BHCmHYvQK7xt9GkdHuzuCGS1Y6g9Gvnx3Ym33fz/HpLRYxiS0wHNr+m/MBC8B647Xt608vCDEvhl9c6Mw==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/istanbul-lib-instrument": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/istanbul-lib-instrument/-/istanbul-lib-instrument-6.0.1.tgz", + "integrity": "sha512-EAMEJBsYuyyztxMxW3g7ugGPkrZsV57v0Hmv3mm1uQsmB+QnZuepg731CRaIgeUVSdmsTngOkSnauNF8p7FIhA==", + "dev": true, + "dependencies": { + "@babel/core": "^7.12.3", + "@babel/parser": "^7.14.7", + "@istanbuljs/schema": "^0.1.2", + "istanbul-lib-coverage": "^3.2.0", + "semver": "^7.5.4" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/istanbul-lib-instrument/node_modules/lru-cache": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", + "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", + "dev": true, + "dependencies": { + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/istanbul-lib-instrument/node_modules/semver": { + "version": "7.5.4", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz", + "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==", + "dev": true, + "dependencies": { + "lru-cache": "^6.0.0" + }, + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/istanbul-lib-instrument/node_modules/yallist": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", + "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", + "dev": true + }, + "node_modules/istanbul-lib-report": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/istanbul-lib-report/-/istanbul-lib-report-3.0.1.tgz", + "integrity": "sha512-GCfE1mtsHGOELCU8e/Z7YWzpmybrx/+dSTfLrvY8qRmaY6zXTKWn6WQIjaAFw069icm6GVMNkgu0NzI4iPZUNw==", + "dev": true, + "dependencies": { + "istanbul-lib-coverage": "^3.0.0", + "make-dir": "^4.0.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/istanbul-lib-source-maps": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/istanbul-lib-source-maps/-/istanbul-lib-source-maps-4.0.1.tgz", + "integrity": "sha512-n3s8EwkdFIJCG3BPKBYvskgXGoy88ARzvegkitk60NxRdwltLOTaH7CUiMRXvwYorl0Q712iEjcWB+fK/MrWVw==", + "dev": true, + "dependencies": { + "debug": "^4.1.1", + "istanbul-lib-coverage": "^3.0.0", + "source-map": "^0.6.1" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/istanbul-reports": { + "version": "3.1.6", + "resolved": "https://registry.npmjs.org/istanbul-reports/-/istanbul-reports-3.1.6.tgz", + "integrity": "sha512-TLgnMkKg3iTDsQ9PbPTdpfAK2DzjF9mqUG7RMgcQl8oFjad8ob4laGxv5XV5U9MAfx8D6tSJiUyuAwzLicaxlg==", + "dev": true, + "dependencies": { + "html-escaper": "^2.0.0", + "istanbul-lib-report": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/jest": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest/-/jest-29.7.0.tgz", + "integrity": "sha512-NIy3oAFp9shda19hy4HK0HRTWKtPJmGdnvywu01nOqNC2vZg+Z+fvJDxpMQA88eb2I9EcafcdjYgsDthnYTvGw==", + "dev": true, + "dependencies": { + "@jest/core": "^29.7.0", + "@jest/types": "^29.6.3", + "import-local": "^3.0.2", + "jest-cli": "^29.7.0" + }, + "bin": { + "jest": "bin/jest.js" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "node-notifier": "^8.0.1 || ^9.0.0 || ^10.0.0" + }, + "peerDependenciesMeta": { + "node-notifier": { + "optional": true + } + } + }, + "node_modules/jest-changed-files": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-changed-files/-/jest-changed-files-29.7.0.tgz", + "integrity": "sha512-fEArFiwf1BpQ+4bXSprcDc3/x4HSzL4al2tozwVpDFpsxALjLYdyiIK4e5Vz66GQJIbXJ82+35PtysofptNX2w==", + "dev": true, + "dependencies": { + "execa": "^5.0.0", + "jest-util": "^29.7.0", + "p-limit": "^3.1.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-circus": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-circus/-/jest-circus-29.7.0.tgz", + "integrity": "sha512-3E1nCMgipcTkCocFwM90XXQab9bS+GMsjdpmPrlelaxwD93Ad8iVEjX/vvHPdLPnFf+L40u+5+iutRdA1N9myw==", + "dev": true, + "dependencies": { + "@jest/environment": "^29.7.0", + "@jest/expect": "^29.7.0", + "@jest/test-result": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "co": "^4.6.0", + "dedent": "^1.0.0", + "is-generator-fn": "^2.0.0", + "jest-each": "^29.7.0", + "jest-matcher-utils": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-runtime": "^29.7.0", + "jest-snapshot": "^29.7.0", + "jest-util": "^29.7.0", + "p-limit": "^3.1.0", + "pretty-format": "^29.7.0", + "pure-rand": "^6.0.0", + "slash": "^3.0.0", + "stack-utils": "^2.0.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-cli": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-cli/-/jest-cli-29.7.0.tgz", + "integrity": "sha512-OVVobw2IubN/GSYsxETi+gOe7Ka59EFMR/twOU3Jb2GnKKeMGJB5SGUUrEz3SFVmJASUdZUzy83sLNNQ2gZslg==", + "dev": true, + "dependencies": { + "@jest/core": "^29.7.0", + "@jest/test-result": "^29.7.0", + "@jest/types": "^29.6.3", + "chalk": "^4.0.0", + "create-jest": "^29.7.0", + "exit": "^0.1.2", + "import-local": "^3.0.2", + "jest-config": "^29.7.0", + "jest-util": "^29.7.0", + "jest-validate": "^29.7.0", + "yargs": "^17.3.1" + }, + "bin": { + "jest": "bin/jest.js" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "node-notifier": "^8.0.1 || ^9.0.0 || ^10.0.0" + }, + "peerDependenciesMeta": { + "node-notifier": { + "optional": true + } + } + }, + "node_modules/jest-config": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-config/-/jest-config-29.7.0.tgz", + "integrity": "sha512-uXbpfeQ7R6TZBqI3/TxCU4q4ttk3u0PJeC+E0zbfSoSjq6bJ7buBPxzQPL0ifrkY4DNu4JUdk0ImlBUYi840eQ==", + "dev": true, + "dependencies": { + "@babel/core": "^7.11.6", + "@jest/test-sequencer": "^29.7.0", + "@jest/types": "^29.6.3", + "babel-jest": "^29.7.0", + "chalk": "^4.0.0", + "ci-info": "^3.2.0", + "deepmerge": "^4.2.2", + "glob": "^7.1.3", + "graceful-fs": "^4.2.9", + "jest-circus": "^29.7.0", + "jest-environment-node": "^29.7.0", + "jest-get-type": "^29.6.3", + "jest-regex-util": "^29.6.3", + "jest-resolve": "^29.7.0", + "jest-runner": "^29.7.0", + "jest-util": "^29.7.0", + "jest-validate": "^29.7.0", + "micromatch": "^4.0.4", + "parse-json": "^5.2.0", + "pretty-format": "^29.7.0", + "slash": "^3.0.0", + "strip-json-comments": "^3.1.1" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "@types/node": "*", + "ts-node": ">=9.0.0" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + }, + "ts-node": { + "optional": true + } + } + }, + "node_modules/jest-diff": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-diff/-/jest-diff-29.7.0.tgz", + "integrity": "sha512-LMIgiIrhigmPrs03JHpxUh2yISK3vLFPkAodPeo0+BuF7wA2FoQbkEg1u8gBYBThncu7e1oEDUfIXVuTqLRUjw==", + "dev": true, + "dependencies": { + "chalk": "^4.0.0", + "diff-sequences": "^29.6.3", + "jest-get-type": "^29.6.3", + "pretty-format": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-docblock": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-docblock/-/jest-docblock-29.7.0.tgz", + "integrity": "sha512-q617Auw3A612guyaFgsbFeYpNP5t2aoUNLwBUbc/0kD1R4t9ixDbyFTHd1nok4epoVFpr7PmeWHrhvuV3XaJ4g==", + "dev": true, + "dependencies": { + "detect-newline": "^3.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-each": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-each/-/jest-each-29.7.0.tgz", + "integrity": "sha512-gns+Er14+ZrEoC5fhOfYCY1LOHHr0TI+rQUHZS8Ttw2l7gl+80eHc/gFf2Ktkw0+SIACDTeWvpFcv3B04VembQ==", + "dev": true, + "dependencies": { + "@jest/types": "^29.6.3", + "chalk": "^4.0.0", + "jest-get-type": "^29.6.3", + "jest-util": "^29.7.0", + "pretty-format": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-environment-jsdom": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-environment-jsdom/-/jest-environment-jsdom-29.7.0.tgz", + "integrity": "sha512-k9iQbsf9OyOfdzWH8HDmrRT0gSIcX+FLNW7IQq94tFX0gynPwqDTW0Ho6iMVNjGz/nb+l/vW3dWM2bbLLpkbXA==", + "dev": true, + "dependencies": { + "@jest/environment": "^29.7.0", + "@jest/fake-timers": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/jsdom": "^20.0.0", + "@types/node": "*", + "jest-mock": "^29.7.0", + "jest-util": "^29.7.0", + "jsdom": "^20.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "canvas": "^2.5.0" + }, + "peerDependenciesMeta": { + "canvas": { + "optional": true + } + } + }, + "node_modules/jest-environment-node": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-environment-node/-/jest-environment-node-29.7.0.tgz", + "integrity": "sha512-DOSwCRqXirTOyheM+4d5YZOrWcdu0LNZ87ewUoywbcb2XR4wKgqiG8vNeYwhjFMbEkfju7wx2GYH0P2gevGvFw==", + "dev": true, + "dependencies": { + "@jest/environment": "^29.7.0", + "@jest/fake-timers": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "jest-mock": "^29.7.0", + "jest-util": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-get-type": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/jest-get-type/-/jest-get-type-29.6.3.tgz", + "integrity": "sha512-zrteXnqYxfQh7l5FHyL38jL39di8H8rHoecLH3JNxH3BwOrBsNeabdap5e0I23lD4HHI8W5VFBZqG4Eaq5LNcw==", + "dev": true, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-haste-map": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-haste-map/-/jest-haste-map-29.7.0.tgz", + "integrity": "sha512-fP8u2pyfqx0K1rGn1R9pyE0/KTn+G7PxktWidOBTqFPLYX0b9ksaMFkhK5vrS3DVun09pckLdlx90QthlW7AmA==", + "dev": true, + "dependencies": { + "@jest/types": "^29.6.3", + "@types/graceful-fs": "^4.1.3", + "@types/node": "*", + "anymatch": "^3.0.3", + "fb-watchman": "^2.0.0", + "graceful-fs": "^4.2.9", + "jest-regex-util": "^29.6.3", + "jest-util": "^29.7.0", + "jest-worker": "^29.7.0", + "micromatch": "^4.0.4", + "walker": "^1.0.8" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "optionalDependencies": { + "fsevents": "^2.3.2" + } + }, + "node_modules/jest-leak-detector": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-leak-detector/-/jest-leak-detector-29.7.0.tgz", + "integrity": "sha512-kYA8IJcSYtST2BY9I+SMC32nDpBT3J2NvWJx8+JCuCdl/CR1I4EKUJROiP8XtCcxqgTTBGJNdbB1A8XRKbTetw==", + "dev": true, + "dependencies": { + "jest-get-type": "^29.6.3", + "pretty-format": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-matcher-utils": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-matcher-utils/-/jest-matcher-utils-29.7.0.tgz", + "integrity": "sha512-sBkD+Xi9DtcChsI3L3u0+N0opgPYnCRPtGcQYrgXmR+hmt/fYfWAL0xRXYU8eWOdfuLgBe0YCW3AFtnRLagq/g==", + "dev": true, + "dependencies": { + "chalk": "^4.0.0", + "jest-diff": "^29.7.0", + "jest-get-type": "^29.6.3", + "pretty-format": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-message-util": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-message-util/-/jest-message-util-29.7.0.tgz", + "integrity": "sha512-GBEV4GRADeP+qtB2+6u61stea8mGcOT4mCtrYISZwfu9/ISHFJ/5zOMXYbpBE9RsS5+Gb63DW4FgmnKJ79Kf6w==", + "dev": true, + "dependencies": { + "@babel/code-frame": "^7.12.13", + "@jest/types": "^29.6.3", + "@types/stack-utils": "^2.0.0", + "chalk": "^4.0.0", + "graceful-fs": "^4.2.9", + "micromatch": "^4.0.4", + "pretty-format": "^29.7.0", + "slash": "^3.0.0", + "stack-utils": "^2.0.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-mock": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-mock/-/jest-mock-29.7.0.tgz", + "integrity": "sha512-ITOMZn+UkYS4ZFh83xYAOzWStloNzJFO2s8DWrE4lhtGD+AorgnbkiKERe4wQVBydIGPx059g6riW5Btp6Llnw==", + "dev": true, + "dependencies": { + "@jest/types": "^29.6.3", + "@types/node": "*", + "jest-util": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-pnp-resolver": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/jest-pnp-resolver/-/jest-pnp-resolver-1.2.3.tgz", + "integrity": "sha512-+3NpwQEnRoIBtx4fyhblQDPgJI0H1IEIkX7ShLUjPGA7TtUTvI1oiKi3SR4oBR0hQhQR80l4WAe5RrXBwWMA8w==", + "dev": true, + "engines": { + "node": ">=6" + }, + "peerDependencies": { + "jest-resolve": "*" + }, + "peerDependenciesMeta": { + "jest-resolve": { + "optional": true + } + } + }, + "node_modules/jest-regex-util": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/jest-regex-util/-/jest-regex-util-29.6.3.tgz", + "integrity": "sha512-KJJBsRCyyLNWCNBOvZyRDnAIfUiRJ8v+hOBQYGn8gDyF3UegwiP4gwRR3/SDa42g1YbVycTidUF3rKjyLFDWbg==", + "dev": true, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-resolve": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-resolve/-/jest-resolve-29.7.0.tgz", + "integrity": "sha512-IOVhZSrg+UvVAshDSDtHyFCCBUl/Q3AAJv8iZ6ZjnZ74xzvwuzLXid9IIIPgTnY62SJjfuupMKZsZQRsCvxEgA==", + "dev": true, + "dependencies": { + "chalk": "^4.0.0", + "graceful-fs": "^4.2.9", + "jest-haste-map": "^29.7.0", + "jest-pnp-resolver": "^1.2.2", + "jest-util": "^29.7.0", + "jest-validate": "^29.7.0", + "resolve": "^1.20.0", + "resolve.exports": "^2.0.0", + "slash": "^3.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-resolve-dependencies": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-resolve-dependencies/-/jest-resolve-dependencies-29.7.0.tgz", + "integrity": "sha512-un0zD/6qxJ+S0et7WxeI3H5XSe9lTBBR7bOHCHXkKR6luG5mwDDlIzVQ0V5cZCuoTgEdcdwzTghYkTWfubi+nA==", + "dev": true, + "dependencies": { + "jest-regex-util": "^29.6.3", + "jest-snapshot": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-runner": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-runner/-/jest-runner-29.7.0.tgz", + "integrity": "sha512-fsc4N6cPCAahybGBfTRcq5wFR6fpLznMg47sY5aDpsoejOcVYFb07AHuSnR0liMcPTgBsA3ZJL6kFOjPdoNipQ==", + "dev": true, + "dependencies": { + "@jest/console": "^29.7.0", + "@jest/environment": "^29.7.0", + "@jest/test-result": "^29.7.0", + "@jest/transform": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "emittery": "^0.13.1", + "graceful-fs": "^4.2.9", + "jest-docblock": "^29.7.0", + "jest-environment-node": "^29.7.0", + "jest-haste-map": "^29.7.0", + "jest-leak-detector": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-resolve": "^29.7.0", + "jest-runtime": "^29.7.0", + "jest-util": "^29.7.0", + "jest-watcher": "^29.7.0", + "jest-worker": "^29.7.0", + "p-limit": "^3.1.0", + "source-map-support": "0.5.13" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-runtime": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-runtime/-/jest-runtime-29.7.0.tgz", + "integrity": "sha512-gUnLjgwdGqW7B4LvOIkbKs9WGbn+QLqRQQ9juC6HndeDiezIwhDP+mhMwHWCEcfQ5RUXa6OPnFF8BJh5xegwwQ==", + "dev": true, + "dependencies": { + "@jest/environment": "^29.7.0", + "@jest/fake-timers": "^29.7.0", + "@jest/globals": "^29.7.0", + "@jest/source-map": "^29.6.3", + "@jest/test-result": "^29.7.0", + "@jest/transform": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "cjs-module-lexer": "^1.0.0", + "collect-v8-coverage": "^1.0.0", + "glob": "^7.1.3", + "graceful-fs": "^4.2.9", + "jest-haste-map": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-mock": "^29.7.0", + "jest-regex-util": "^29.6.3", + "jest-resolve": "^29.7.0", + "jest-snapshot": "^29.7.0", + "jest-util": "^29.7.0", + "slash": "^3.0.0", + "strip-bom": "^4.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-snapshot": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-snapshot/-/jest-snapshot-29.7.0.tgz", + "integrity": "sha512-Rm0BMWtxBcioHr1/OX5YCP8Uov4riHvKPknOGs804Zg9JGZgmIBkbtlxJC/7Z4msKYVbIJtfU+tKb8xlYNfdkw==", + "dev": true, + "dependencies": { + "@babel/core": "^7.11.6", + "@babel/generator": "^7.7.2", + "@babel/plugin-syntax-jsx": "^7.7.2", + "@babel/plugin-syntax-typescript": "^7.7.2", + "@babel/types": "^7.3.3", + "@jest/expect-utils": "^29.7.0", + "@jest/transform": "^29.7.0", + "@jest/types": "^29.6.3", + "babel-preset-current-node-syntax": "^1.0.0", + "chalk": "^4.0.0", + "expect": "^29.7.0", + "graceful-fs": "^4.2.9", + "jest-diff": "^29.7.0", + "jest-get-type": "^29.6.3", + "jest-matcher-utils": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-util": "^29.7.0", + "natural-compare": "^1.4.0", + "pretty-format": "^29.7.0", + "semver": "^7.5.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-snapshot/node_modules/lru-cache": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", + "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", + "dev": true, + "dependencies": { + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/jest-snapshot/node_modules/semver": { + "version": "7.5.4", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz", + "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==", + "dev": true, + "dependencies": { + "lru-cache": "^6.0.0" + }, + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/jest-snapshot/node_modules/yallist": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", + "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", + "dev": true + }, + "node_modules/jest-util": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-util/-/jest-util-29.7.0.tgz", + "integrity": "sha512-z6EbKajIpqGKU56y5KBUgy1dt1ihhQJgWzUlZHArA/+X2ad7Cb5iF+AK1EWVL/Bo7Rz9uurpqw6SiBCefUbCGA==", + "dev": true, + "dependencies": { + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "ci-info": "^3.2.0", + "graceful-fs": "^4.2.9", + "picomatch": "^2.2.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-validate": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-validate/-/jest-validate-29.7.0.tgz", + "integrity": "sha512-ZB7wHqaRGVw/9hST/OuFUReG7M8vKeq0/J2egIGLdvjHCmYqGARhzXmtgi+gVeZ5uXFF219aOc3Ls2yLg27tkw==", + "dev": true, + "dependencies": { + "@jest/types": "^29.6.3", + "camelcase": "^6.2.0", + "chalk": "^4.0.0", + "jest-get-type": "^29.6.3", + "leven": "^3.1.0", + "pretty-format": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-validate/node_modules/camelcase": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.3.0.tgz", + "integrity": "sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/jest-watcher": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-watcher/-/jest-watcher-29.7.0.tgz", + "integrity": "sha512-49Fg7WXkU3Vl2h6LbLtMQ/HyB6rXSIX7SqvBLQmssRBGN9I0PNvPmAmCWSOY6SOvrjhI/F7/bGAv9RtnsPA03g==", + "dev": true, + "dependencies": { + "@jest/test-result": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "ansi-escapes": "^4.2.1", + "chalk": "^4.0.0", + "emittery": "^0.13.1", + "jest-util": "^29.7.0", + "string-length": "^4.0.1" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-worker": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-worker/-/jest-worker-29.7.0.tgz", + "integrity": "sha512-eIz2msL/EzL9UFTFFx7jBTkeZfku0yUAyZZZmJ93H2TYEiroIx2PQjEXcwYtYl8zXCxb+PAmA2hLIt/6ZEkPHw==", + "dev": true, + "dependencies": { + "@types/node": "*", + "jest-util": "^29.7.0", + "merge-stream": "^2.0.0", + "supports-color": "^8.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-worker/node_modules/supports-color": { + "version": "8.1.1", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", + "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", + "dev": true, + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/supports-color?sponsor=1" + } + }, + "node_modules/js-tokens": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", + "dev": true + }, + "node_modules/js-yaml": { + "version": "3.14.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", + "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", + "dev": true, + "dependencies": { + "argparse": "^1.0.7", + "esprima": "^4.0.0" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/jsdom": { + "version": "20.0.3", + "resolved": "https://registry.npmjs.org/jsdom/-/jsdom-20.0.3.tgz", + "integrity": "sha512-SYhBvTh89tTfCD/CRdSOm13mOBa42iTaTyfyEWBdKcGdPxPtLFBXuHR8XHb33YNYaP+lLbmSvBTsnoesCNJEsQ==", + "dev": true, + "dependencies": { + "abab": "^2.0.6", + "acorn": "^8.8.1", + "acorn-globals": "^7.0.0", + "cssom": "^0.5.0", + "cssstyle": "^2.3.0", + "data-urls": "^3.0.2", + "decimal.js": "^10.4.2", + "domexception": "^4.0.0", + "escodegen": "^2.0.0", + "form-data": "^4.0.0", + "html-encoding-sniffer": "^3.0.0", + "http-proxy-agent": "^5.0.0", + "https-proxy-agent": "^5.0.1", + "is-potential-custom-element-name": "^1.0.1", + "nwsapi": "^2.2.2", + "parse5": "^7.1.1", + "saxes": "^6.0.0", + "symbol-tree": "^3.2.4", + "tough-cookie": "^4.1.2", + "w3c-xmlserializer": "^4.0.0", + "webidl-conversions": "^7.0.0", + "whatwg-encoding": "^2.0.0", + "whatwg-mimetype": "^3.0.0", + "whatwg-url": "^11.0.0", + "ws": "^8.11.0", + "xml-name-validator": "^4.0.0" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "canvas": "^2.5.0" + }, + "peerDependenciesMeta": { + "canvas": { + "optional": true + } + } + }, + "node_modules/jsesc": { + "version": "2.5.2", + "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-2.5.2.tgz", + "integrity": "sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA==", + "dev": true, + "bin": { + "jsesc": "bin/jsesc" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/json-parse-even-better-errors": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz", + "integrity": "sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==", + "dev": true + }, + "node_modules/json5": { + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz", + "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==", + "dev": true, + "bin": { + "json5": "lib/cli.js" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/kleur": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/kleur/-/kleur-3.0.3.tgz", + "integrity": "sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/leven": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/leven/-/leven-3.1.0.tgz", + "integrity": "sha512-qsda+H8jTaUaN/x5vzW2rzc+8Rw4TAQ/4KjB46IwK5VH+IlVeeeje/EoZRpiXvIqjFgK84QffqPztGI3VBLG1A==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/lines-and-columns": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz", + "integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==", + "dev": true + }, + "node_modules/locate-path": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz", + "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==", + "dev": true, + "dependencies": { + "p-locate": "^4.1.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/lodash.debounce": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/lodash.debounce/-/lodash.debounce-4.0.8.tgz", + "integrity": "sha512-FT1yDzDYEoYWhnSGnpE/4Kj1fLZkDFyqRb7fNt6FdYOSxlUWAtp42Eh6Wb0rGIv/m9Bgo7x4GhQbm5Ys4SG5ow==", + "dev": true + }, + "node_modules/lru-cache": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz", + "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==", + "dev": true, + "dependencies": { + "yallist": "^3.0.2" + } + }, + "node_modules/make-dir": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-4.0.0.tgz", + "integrity": "sha512-hXdUTZYIVOt1Ex//jAQi+wTZZpUpwBj/0QsOzqegb3rGMMeJiSEu5xLHnYfBrRV4RH2+OCSOO95Is/7x1WJ4bw==", + "dev": true, + "dependencies": { + "semver": "^7.5.3" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/make-dir/node_modules/lru-cache": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", + "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", + "dev": true, + "dependencies": { + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/make-dir/node_modules/semver": { + "version": "7.5.4", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz", + "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==", + "dev": true, + "dependencies": { + "lru-cache": "^6.0.0" + }, + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/make-dir/node_modules/yallist": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", + "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", + "dev": true + }, + "node_modules/makeerror": { + "version": "1.0.12", + "resolved": "https://registry.npmjs.org/makeerror/-/makeerror-1.0.12.tgz", + "integrity": "sha512-JmqCvUhmt43madlpFzG4BQzG2Z3m6tvQDNKdClZnO3VbIudJYmxsT0FNJMeiB2+JTSlTQTSbU8QdesVmwJcmLg==", + "dev": true, + "dependencies": { + "tmpl": "1.0.5" + } + }, + "node_modules/merge-stream": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz", + "integrity": "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==", + "dev": true + }, + "node_modules/micromatch": { + "version": "4.0.5", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.5.tgz", + "integrity": "sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA==", + "dev": true, + "dependencies": { + "braces": "^3.0.2", + "picomatch": "^2.3.1" + }, + "engines": { + "node": ">=8.6" + } + }, + "node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "dev": true, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "dev": true, + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mimic-fn": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz", + "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/ms": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", + "dev": true + }, + "node_modules/natural-compare": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz", + "integrity": "sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==", + "dev": true + }, + "node_modules/node-int64": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/node-int64/-/node-int64-0.4.0.tgz", + "integrity": "sha512-O5lz91xSOeoXP6DulyHfllpq+Eg00MWitZIbtPfoSEvqIHdl5gfcY6hYzDWnj0qD5tz52PI08u9qUvSVeUBeHw==", + "dev": true + }, + "node_modules/node-releases": { + "version": "2.0.13", + "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.13.tgz", + "integrity": "sha512-uYr7J37ae/ORWdZeQ1xxMJe3NtdmqMC/JZK+geofDrkLUApKRHPd18/TxtBOJ4A0/+uUIliorNrfYV6s1b02eQ==", + "dev": true + }, + "node_modules/normalize-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", + "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/npm-run-path": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-4.0.1.tgz", + "integrity": "sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==", + "dev": true, + "dependencies": { + "path-key": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/nwsapi": { + "version": "2.2.7", + "resolved": "https://registry.npmjs.org/nwsapi/-/nwsapi-2.2.7.tgz", + "integrity": "sha512-ub5E4+FBPKwAZx0UwIQOjYWGHTEq5sPqHQNRN8Z9e4A7u3Tj1weLJsL59yH9vmvqEtBHaOmT6cYQKIZOxp35FQ==", + "dev": true + }, + "node_modules/once": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", + "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", + "dev": true, + "dependencies": { + "wrappy": "1" + } + }, + "node_modules/onetime": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz", + "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==", + "dev": true, + "dependencies": { + "mimic-fn": "^2.1.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-limit": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", + "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", + "dev": true, + "dependencies": { + "yocto-queue": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-locate": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz", + "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==", + "dev": true, + "dependencies": { + "p-limit": "^2.2.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/p-locate/node_modules/p-limit": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", + "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", + "dev": true, + "dependencies": { + "p-try": "^2.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-try": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", + "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/parse-json": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-5.2.0.tgz", + "integrity": "sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==", + "dev": true, + "dependencies": { + "@babel/code-frame": "^7.0.0", + "error-ex": "^1.3.1", + "json-parse-even-better-errors": "^2.3.0", + "lines-and-columns": "^1.1.6" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/parse5": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/parse5/-/parse5-7.1.2.tgz", + "integrity": "sha512-Czj1WaSVpaoj0wbhMzLmWD69anp2WH7FXMB9n1Sy8/ZFF9jolSQVMu1Ij5WIyGmcBmhk7EOndpO4mIpihVqAXw==", + "dev": true, + "dependencies": { + "entities": "^4.4.0" + }, + "funding": { + "url": "https://github.com/inikulin/parse5?sponsor=1" + } + }, + "node_modules/path-exists": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/path-is-absolute": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", + "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/path-parse": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", + "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==", + "dev": true + }, + "node_modules/picocolors": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.0.tgz", + "integrity": "sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ==", + "dev": true + }, + "node_modules/picomatch": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "dev": true, + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/pirates": { + "version": "4.0.6", + "resolved": "https://registry.npmjs.org/pirates/-/pirates-4.0.6.tgz", + "integrity": "sha512-saLsH7WeYYPiD25LDuLRRY/i+6HaPYr6G1OUlN39otzkSTxKnubR9RTxS3/Kk50s1g2JTgFwWQDQyplC5/SHZg==", + "dev": true, + "engines": { + "node": ">= 6" + } + }, + "node_modules/pkg-dir": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-4.2.0.tgz", + "integrity": "sha512-HRDzbaKjC+AOWVXxAU/x54COGeIv9eb+6CkDSQoNTt4XyWoIJvuPsXizxu/Fr23EiekbtZwmh1IcIG/l/a10GQ==", + "dev": true, + "dependencies": { + "find-up": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/pretty-format": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-29.7.0.tgz", + "integrity": "sha512-Pdlw/oPxN+aXdmM9R00JVC9WVFoCLTKJvDVLgmJ+qAffBMxsV85l/Lu7sNx4zSzPyoL2euImuEwHhOXdEgNFZQ==", + "dev": true, + "dependencies": { + "@jest/schemas": "^29.6.3", + "ansi-styles": "^5.0.0", + "react-is": "^18.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/pretty-format/node_modules/ansi-styles": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz", + "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/prompts": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/prompts/-/prompts-2.4.2.tgz", + "integrity": "sha512-NxNv/kLguCA7p3jE8oL2aEBsrJWgAakBpgmgK6lpPWV+WuOmY6r2/zbAVnP+T8bQlA0nzHXSJSJW0Hq7ylaD2Q==", + "dev": true, + "dependencies": { + "kleur": "^3.0.3", + "sisteransi": "^1.0.5" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/psl": { + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/psl/-/psl-1.9.0.tgz", + "integrity": "sha512-E/ZsdU4HLs/68gYzgGTkMicWTLPdAftJLfJFlLUAAKZGkStNU72sZjT66SnMDVOfOWY/YAoiD7Jxa9iHvngcag==", + "dev": true + }, + "node_modules/punycode": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.0.tgz", + "integrity": "sha512-rRV+zQD8tVFys26lAGR9WUuS4iUAngJScM+ZRSKtvl5tKeZ2t5bvdNFdNHBW9FWR4guGHlgmsZ1G7BSm2wTbuA==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/pure-rand": { + "version": "6.0.4", + "resolved": "https://registry.npmjs.org/pure-rand/-/pure-rand-6.0.4.tgz", + "integrity": "sha512-LA0Y9kxMYv47GIPJy6MI84fqTd2HmYZI83W/kM/SkKfDlajnZYfmXFTxkbY+xSBPkLJxltMa9hIkmdc29eguMA==", + "dev": true, + "funding": [ + { + "type": "individual", + "url": "https://github.com/sponsors/dubzzz" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/fast-check" + } + ] + }, + "node_modules/querystringify": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/querystringify/-/querystringify-2.2.0.tgz", + "integrity": "sha512-FIqgj2EUvTa7R50u0rGsyTftzjYmv/a3hO345bZNrqabNqjtgiDMgmo4mkUjd+nzU5oF3dClKqFIPUKybUyqoQ==", + "dev": true + }, + "node_modules/react-is": { + "version": "18.2.0", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.2.0.tgz", + "integrity": "sha512-xWGDIW6x921xtzPkhiULtthJHoJvBbF3q26fzloPCK0hsvxtPVelvftw3zjbHWSkR2km9Z+4uxbDDK/6Zw9B8w==", + "dev": true + }, + "node_modules/regenerate": { + "version": "1.4.2", + "resolved": "https://registry.npmjs.org/regenerate/-/regenerate-1.4.2.tgz", + "integrity": "sha512-zrceR/XhGYU/d/opr2EKO7aRHUeiBI8qjtfHqADTwZd6Szfy16la6kqD0MIUs5z5hx6AaKa+PixpPrR289+I0A==", + "dev": true + }, + "node_modules/regenerate-unicode-properties": { + "version": "10.1.1", + "resolved": "https://registry.npmjs.org/regenerate-unicode-properties/-/regenerate-unicode-properties-10.1.1.tgz", + "integrity": "sha512-X007RyZLsCJVVrjgEFVpLUTZwyOZk3oiL75ZcuYjlIWd6rNJtOjkBwQc5AsRrpbKVkxN6sklw/k/9m2jJYOf8Q==", + "dev": true, + "dependencies": { + "regenerate": "^1.4.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/regenerator-runtime": { + "version": "0.14.0", + "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.14.0.tgz", + "integrity": "sha512-srw17NI0TUWHuGa5CFGGmhfNIeja30WMBfbslPNhf6JrqQlLN5gcrvig1oqPxiVaXb0oW0XRKtH6Nngs5lKCIA==", + "dev": true + }, + "node_modules/regenerator-transform": { + "version": "0.15.2", + "resolved": "https://registry.npmjs.org/regenerator-transform/-/regenerator-transform-0.15.2.tgz", + "integrity": "sha512-hfMp2BoF0qOk3uc5V20ALGDS2ddjQaLrdl7xrGXvAIow7qeWRM2VA2HuCHkUKk9slq3VwEwLNK3DFBqDfPGYtg==", + "dev": true, + "dependencies": { + "@babel/runtime": "^7.8.4" + } + }, + "node_modules/regexpu-core": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/regexpu-core/-/regexpu-core-5.3.2.tgz", + "integrity": "sha512-RAM5FlZz+Lhmo7db9L298p2vHP5ZywrVXmVXpmAD9GuL5MPH6t9ROw1iA/wfHkQ76Qe7AaPF0nGuim96/IrQMQ==", + "dev": true, + "dependencies": { + "@babel/regjsgen": "^0.8.0", + "regenerate": "^1.4.2", + "regenerate-unicode-properties": "^10.1.0", + "regjsparser": "^0.9.1", + "unicode-match-property-ecmascript": "^2.0.0", + "unicode-match-property-value-ecmascript": "^2.1.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/regjsparser": { + "version": "0.9.1", + "resolved": "https://registry.npmjs.org/regjsparser/-/regjsparser-0.9.1.tgz", + "integrity": "sha512-dQUtn90WanSNl+7mQKcXAgZxvUe7Z0SqXlgzv0za4LwiUhyzBC58yQO3liFoUgu8GiJVInAhJjkj1N0EtQ5nkQ==", + "dev": true, + "dependencies": { + "jsesc": "~0.5.0" + }, + "bin": { + "regjsparser": "bin/parser" + } + }, + "node_modules/regjsparser/node_modules/jsesc": { + "version": "0.5.0", + "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-0.5.0.tgz", + "integrity": "sha512-uZz5UnB7u4T9LvwmFqXii7pZSouaRPorGs5who1Ip7VO0wxanFvBL7GkM6dTHlgX+jhBApRetaWpnDabOeTcnA==", + "dev": true, + "bin": { + "jsesc": "bin/jsesc" + } + }, + "node_modules/require-directory": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", + "integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/requires-port": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/requires-port/-/requires-port-1.0.0.tgz", + "integrity": "sha512-KigOCHcocU3XODJxsu8i/j8T9tzT4adHiecwORRQ0ZZFcp7ahwXuRU1m+yuO90C5ZUyGeGfocHDI14M3L3yDAQ==", + "dev": true + }, + "node_modules/resolve": { + "version": "1.22.6", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.6.tgz", + "integrity": "sha512-njhxM7mV12JfufShqGy3Rz8j11RPdLy4xi15UurGJeoHLfJpVXKdh3ueuOqbYUcDZnffr6X739JBo5LzyahEsw==", + "dev": true, + "dependencies": { + "is-core-module": "^2.13.0", + "path-parse": "^1.0.7", + "supports-preserve-symlinks-flag": "^1.0.0" + }, + "bin": { + "resolve": "bin/resolve" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/resolve-cwd": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/resolve-cwd/-/resolve-cwd-3.0.0.tgz", + "integrity": "sha512-OrZaX2Mb+rJCpH/6CpSqt9xFVpN++x01XnN2ie9g6P5/3xelLAkXWVADpdz1IHD/KFfEXyE6V0U01OQ3UO2rEg==", + "dev": true, + "dependencies": { + "resolve-from": "^5.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/resolve-from": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-5.0.0.tgz", + "integrity": "sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/resolve.exports": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/resolve.exports/-/resolve.exports-2.0.2.tgz", + "integrity": "sha512-X2UW6Nw3n/aMgDVy+0rSqgHlv39WZAlZrXCdnbyEiKm17DSqHX4MmQMaST3FbeWR5FTuRcUwYAziZajji0Y7mg==", + "dev": true, + "engines": { + "node": ">=10" + } + }, + "node_modules/safer-buffer": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", + "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==", + "dev": true + }, + "node_modules/saxes": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/saxes/-/saxes-6.0.0.tgz", + "integrity": "sha512-xAg7SOnEhrm5zI3puOOKyy1OMcMlIJZYNJY7xLBwSze0UjhPLnWfj2GF2EpT0jmzaJKIWKHLsaSSajf35bcYnA==", + "dev": true, + "dependencies": { + "xmlchars": "^2.2.0" + }, + "engines": { + "node": ">=v12.22.7" + } + }, + "node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "dev": true, + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "dev": true, + "dependencies": { + "shebang-regex": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/signal-exit": { + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", + "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==", + "dev": true + }, + "node_modules/sisteransi": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/sisteransi/-/sisteransi-1.0.5.tgz", + "integrity": "sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg==", + "dev": true + }, + "node_modules/slash": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz", + "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/source-map-support": { + "version": "0.5.13", + "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.13.tgz", + "integrity": "sha512-SHSKFHadjVA5oR4PPqhtAVdcBWwRYVd6g6cAXnIbRiIwc2EhPrTuKUBdSLvlEKyIP3GCf89fltvcZiP9MMFA1w==", + "dev": true, + "dependencies": { + "buffer-from": "^1.0.0", + "source-map": "^0.6.0" + } + }, + "node_modules/sprintf-js": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz", + "integrity": "sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==", + "dev": true + }, + "node_modules/stack-utils": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/stack-utils/-/stack-utils-2.0.6.tgz", + "integrity": "sha512-XlkWvfIm6RmsWtNJx+uqtKLS8eqFbxUg0ZzLXqY0caEy9l7hruX8IpiDnjsLavoBgqCCR71TqWO8MaXYheJ3RQ==", + "dev": true, + "dependencies": { + "escape-string-regexp": "^2.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/string-length": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/string-length/-/string-length-4.0.2.tgz", + "integrity": "sha512-+l6rNN5fYHNhZZy41RXsYptCjA2Igmq4EG7kZAYFQI1E1VTXarr6ZPXBg6eq7Y6eK4FEhY6AJlyuFIb/v/S0VQ==", + "dev": true, + "dependencies": { + "char-regex": "^1.0.2", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dev": true, + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-bom": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-4.0.0.tgz", + "integrity": "sha512-3xurFv5tEgii33Zi8Jtp55wEIILR9eh34FAW00PZf+JnSsTmV/ioewSgQl97JHvgjoRGwPShsWm+IdrxB35d0w==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-final-newline": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-2.0.0.tgz", + "integrity": "sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/strip-json-comments": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", + "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", + "dev": true, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/supports-preserve-symlinks-flag": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz", + "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==", + "dev": true, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/symbol-tree": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/symbol-tree/-/symbol-tree-3.2.4.tgz", + "integrity": "sha512-9QNk5KwDF+Bvz+PyObkmSYjI5ksVUYtjW7AU22r2NKcfLJcXp96hkDWU3+XndOsUb+AQ9QhfzfCT2O+CNWT5Tw==", + "dev": true + }, + "node_modules/test-exclude": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/test-exclude/-/test-exclude-6.0.0.tgz", + "integrity": "sha512-cAGWPIyOHU6zlmg88jwm7VRyXnMN7iV68OGAbYDk/Mh/xC/pzVPlQtY6ngoIH/5/tciuhGfvESU8GrHrcxD56w==", + "dev": true, + "dependencies": { + "@istanbuljs/schema": "^0.1.2", + "glob": "^7.1.4", + "minimatch": "^3.0.4" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/tmpl": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/tmpl/-/tmpl-1.0.5.tgz", + "integrity": "sha512-3f0uOEAQwIqGuWW2MVzYg8fV/QNnc/IpuJNG837rLuczAaLVHslWHZQj4IGiEl5Hs3kkbhwL9Ab7Hrsmuj+Smw==", + "dev": true + }, + "node_modules/to-fast-properties": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/to-fast-properties/-/to-fast-properties-2.0.0.tgz", + "integrity": "sha512-/OaKK0xYrs3DmxRYqL/yDc+FxFUVYhDlXMhRmv3z915w2HF1tnN1omB354j8VUGO/hbRzyD6Y3sA7v7GS/ceog==", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "dev": true, + "dependencies": { + "is-number": "^7.0.0" + }, + "engines": { + "node": ">=8.0" + } + }, + "node_modules/tough-cookie": { + "version": "4.1.3", + "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-4.1.3.tgz", + "integrity": "sha512-aX/y5pVRkfRnfmuX+OdbSdXvPe6ieKX/G2s7e98f4poJHnqH3281gDPm/metm6E/WRamfx7WC4HUqkWHfQHprw==", + "dev": true, + "dependencies": { + "psl": "^1.1.33", + "punycode": "^2.1.1", + "universalify": "^0.2.0", + "url-parse": "^1.5.3" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/tr46": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/tr46/-/tr46-3.0.0.tgz", + "integrity": "sha512-l7FvfAHlcmulp8kr+flpQZmVwtu7nfRV7NZujtN0OqES8EL4O4e0qqzL0DC5gAvx/ZC/9lk6rhcUwYvkBnBnYA==", + "dev": true, + "dependencies": { + "punycode": "^2.1.1" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/type-detect": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/type-detect/-/type-detect-4.0.8.tgz", + "integrity": "sha512-0fr/mIH1dlO+x7TlcMy+bIDqKPsw/70tVyeHW787goQjhmqaZe10uwLujubK9q9Lg6Fiho1KUKDYz0Z7k7g5/g==", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/type-fest": { + "version": "0.21.3", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.21.3.tgz", + "integrity": "sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/unicode-canonical-property-names-ecmascript": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/unicode-canonical-property-names-ecmascript/-/unicode-canonical-property-names-ecmascript-2.0.0.tgz", + "integrity": "sha512-yY5PpDlfVIU5+y/BSCxAJRBIS1Zc2dDG3Ujq+sR0U+JjUevW2JhocOF+soROYDSaAezOzOKuyyixhD6mBknSmQ==", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/unicode-match-property-ecmascript": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/unicode-match-property-ecmascript/-/unicode-match-property-ecmascript-2.0.0.tgz", + "integrity": "sha512-5kaZCrbp5mmbz5ulBkDkbY0SsPOjKqVS35VpL9ulMPfSl0J0Xsm+9Evphv9CoIZFwre7aJoa94AY6seMKGVN5Q==", + "dev": true, + "dependencies": { + "unicode-canonical-property-names-ecmascript": "^2.0.0", + "unicode-property-aliases-ecmascript": "^2.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/unicode-match-property-value-ecmascript": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/unicode-match-property-value-ecmascript/-/unicode-match-property-value-ecmascript-2.1.0.tgz", + "integrity": "sha512-qxkjQt6qjg/mYscYMC0XKRn3Rh0wFPlfxB0xkt9CfyTvpX1Ra0+rAmdX2QyAobptSEvuy4RtpPRui6XkV+8wjA==", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/unicode-property-aliases-ecmascript": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/unicode-property-aliases-ecmascript/-/unicode-property-aliases-ecmascript-2.1.0.tgz", + "integrity": "sha512-6t3foTQI9qne+OZoVQB/8x8rk2k1eVy1gRXhV3oFQ5T6R1dqQ1xtin3XqSlx3+ATBkliTaR/hHyJBm+LVPNM8w==", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/universalify": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/universalify/-/universalify-0.2.0.tgz", + "integrity": "sha512-CJ1QgKmNg3CwvAv/kOFmtnEN05f0D/cn9QntgNOQlQF9dgvVTHj3t+8JPdjqawCHk7V/KA+fbUqzZ9XWhcqPUg==", + "dev": true, + "engines": { + "node": ">= 4.0.0" + } + }, + "node_modules/update-browserslist-db": { + "version": "1.0.13", + "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.0.13.tgz", + "integrity": "sha512-xebP81SNcPuNpPP3uzeW1NYXxI3rxyJzF3pD6sH4jE7o/IX+WtSpwnVU+qIsDPyk0d3hmFQ7mjqc6AtV604hbg==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "dependencies": { + "escalade": "^3.1.1", + "picocolors": "^1.0.0" + }, + "bin": { + "update-browserslist-db": "cli.js" + }, + "peerDependencies": { + "browserslist": ">= 4.21.0" + } + }, + "node_modules/url-parse": { + "version": "1.5.10", + "resolved": "https://registry.npmjs.org/url-parse/-/url-parse-1.5.10.tgz", + "integrity": "sha512-WypcfiRhfeUP9vvF0j6rw0J3hrWrw6iZv3+22h6iRMJ/8z1Tj6XfLP4DsUix5MhMPnXpiHDoKyoZ/bdCkwBCiQ==", + "dev": true, + "dependencies": { + "querystringify": "^2.1.1", + "requires-port": "^1.0.0" + } + }, + "node_modules/v8-to-istanbul": { + "version": "9.1.3", + "resolved": "https://registry.npmjs.org/v8-to-istanbul/-/v8-to-istanbul-9.1.3.tgz", + "integrity": "sha512-9lDD+EVI2fjFsMWXc6dy5JJzBsVTcQ2fVkfBvncZ6xJWG9wtBhOldG+mHkSL0+V1K/xgZz0JDO5UT5hFwHUghg==", + "dev": true, + "dependencies": { + "@jridgewell/trace-mapping": "^0.3.12", + "@types/istanbul-lib-coverage": "^2.0.1", + "convert-source-map": "^2.0.0" + }, + "engines": { + "node": ">=10.12.0" + } + }, + "node_modules/w3c-xmlserializer": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/w3c-xmlserializer/-/w3c-xmlserializer-4.0.0.tgz", + "integrity": "sha512-d+BFHzbiCx6zGfz0HyQ6Rg69w9k19nviJspaj4yNscGjrHu94sVP+aRm75yEbCh+r2/yR+7q6hux9LVtbuTGBw==", + "dev": true, + "dependencies": { + "xml-name-validator": "^4.0.0" + }, + "engines": { + "node": ">=14" + } + }, + "node_modules/walker": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/walker/-/walker-1.0.8.tgz", + "integrity": "sha512-ts/8E8l5b7kY0vlWLewOkDXMmPdLcVV4GmOQLyxuSswIJsweeFZtAsMF7k1Nszz+TYBQrlYRmzOnr398y1JemQ==", + "dev": true, + "dependencies": { + "makeerror": "1.0.12" + } + }, + "node_modules/webidl-conversions": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-7.0.0.tgz", + "integrity": "sha512-VwddBukDzu71offAQR975unBIGqfKZpM+8ZX6ySk8nYhVoo5CYaZyzt3YBvYtRtO+aoGlqxPg/B87NGVZ/fu6g==", + "dev": true, + "engines": { + "node": ">=12" + } + }, + "node_modules/whatwg-encoding": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/whatwg-encoding/-/whatwg-encoding-2.0.0.tgz", + "integrity": "sha512-p41ogyeMUrw3jWclHWTQg1k05DSVXPLcVxRTYsXUk+ZooOCZLcoYgPZ/HL/D/N+uQPOtcp1me1WhBEaX02mhWg==", + "dev": true, + "dependencies": { + "iconv-lite": "0.6.3" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/whatwg-mimetype": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/whatwg-mimetype/-/whatwg-mimetype-3.0.0.tgz", + "integrity": "sha512-nt+N2dzIutVRxARx1nghPKGv1xHikU7HKdfafKkLNLindmPU/ch3U31NOCGGA/dmPcmb1VlofO0vnKAcsm0o/Q==", + "dev": true, + "engines": { + "node": ">=12" + } + }, + "node_modules/whatwg-url": { + "version": "11.0.0", + "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-11.0.0.tgz", + "integrity": "sha512-RKT8HExMpoYx4igMiVMY83lN6UeITKJlBQ+vR/8ZJ8OCdSiN3RwCq+9gH0+Xzj0+5IrM6i4j/6LuvzbZIQgEcQ==", + "dev": true, + "dependencies": { + "tr46": "^3.0.0", + "webidl-conversions": "^7.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dev": true, + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/wrap-ansi": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "dev": true, + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/wrappy": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", + "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==", + "dev": true + }, + "node_modules/write-file-atomic": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/write-file-atomic/-/write-file-atomic-4.0.2.tgz", + "integrity": "sha512-7KxauUdBmSdWnmpaGFg+ppNjKF8uNLry8LyzjauQDOVONfFLNKrKvQOxZ/VuTIcS/gge/YNahf5RIIQWTSarlg==", + "dev": true, + "dependencies": { + "imurmurhash": "^0.1.4", + "signal-exit": "^3.0.7" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + } + }, + "node_modules/ws": { + "version": "8.14.2", + "resolved": "https://registry.npmjs.org/ws/-/ws-8.14.2.tgz", + "integrity": "sha512-wEBG1ftX4jcglPxgFCMJmZ2PLtSbJ2Peg6TmpJFTbe9GZYOQCDPdMYu/Tm0/bGZkw8paZnJY45J4K2PZrLYq8g==", + "dev": true, + "engines": { + "node": ">=10.0.0" + }, + "peerDependencies": { + "bufferutil": "^4.0.1", + "utf-8-validate": ">=5.0.2" + }, + "peerDependenciesMeta": { + "bufferutil": { + "optional": true + }, + "utf-8-validate": { + "optional": true + } + } + }, + "node_modules/xml-name-validator": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/xml-name-validator/-/xml-name-validator-4.0.0.tgz", + "integrity": "sha512-ICP2e+jsHvAj2E2lIHxa5tjXRlKDJo4IdvPvCXbXQGdzSfmSpNVyIKMvoZHjDY9DP0zV17iI85o90vRFXNccRw==", + "dev": true, + "engines": { + "node": ">=12" + } + }, + "node_modules/xmlchars": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/xmlchars/-/xmlchars-2.2.0.tgz", + "integrity": "sha512-JZnDKK8B0RCDw84FNdDAIpZK+JuJw+s7Lz8nksI7SIuU3UXJJslUthsi+uWBUYOwPFwW7W7PRLRfUKpxjtjFCw==", + "dev": true + }, + "node_modules/y18n": { + "version": "5.0.8", + "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", + "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==", + "dev": true, + "engines": { + "node": ">=10" + } + }, + "node_modules/yallist": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz", + "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==", + "dev": true + }, + "node_modules/yargs": { + "version": "17.7.2", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz", + "integrity": "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==", + "dev": true, + "dependencies": { + "cliui": "^8.0.1", + "escalade": "^3.1.1", + "get-caller-file": "^2.0.5", + "require-directory": "^2.1.1", + "string-width": "^4.2.3", + "y18n": "^5.0.5", + "yargs-parser": "^21.1.1" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/yargs-parser": { + "version": "21.1.1", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz", + "integrity": "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==", + "dev": true, + "engines": { + "node": ">=12" + } + }, + "node_modules/yocto-queue": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", + "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + } + } +} diff --git a/tests-ui/package.json b/tests-ui/package.json new file mode 100644 index 000000000..e7b60ad8e --- /dev/null +++ b/tests-ui/package.json @@ -0,0 +1,30 @@ +{ + "name": "comfui-tests", + "version": "1.0.0", + "description": "UI tests", + "main": "index.js", + "scripts": { + "test": "jest", + "test:generate": "node setup.js" + }, + "repository": { + "type": "git", + "url": "git+https://github.com/comfyanonymous/ComfyUI.git" + }, + "keywords": [ + "comfyui", + "test" + ], + "author": "comfyanonymous", + "license": "GPL-3.0", + "bugs": { + "url": "https://github.com/comfyanonymous/ComfyUI/issues" + }, + "homepage": "https://github.com/comfyanonymous/ComfyUI#readme", + "devDependencies": { + "@babel/preset-env": "^7.22.20", + "@types/jest": "^29.5.5", + "jest": "^29.7.0", + "jest-environment-jsdom": "^29.7.0" + } +} diff --git a/tests-ui/setup.js b/tests-ui/setup.js new file mode 100644 index 000000000..0f368ab22 --- /dev/null +++ b/tests-ui/setup.js @@ -0,0 +1,87 @@ +const { spawn } = require("child_process"); +const { resolve } = require("path"); +const { existsSync, mkdirSync, writeFileSync } = require("fs"); +const http = require("http"); + +async function setup() { + // Wait up to 30s for it to start + let success = false; + let child; + for (let i = 0; i < 30; i++) { + try { + await new Promise((res, rej) => { + http + .get("http://127.0.0.1:8188/object_info", (resp) => { + let data = ""; + resp.on("data", (chunk) => { + data += chunk; + }); + resp.on("end", () => { + // Modify the response data to add some checkpoints + const objectInfo = JSON.parse(data); + objectInfo.CheckpointLoaderSimple.input.required.ckpt_name[0] = ["model1.safetensors", "model2.ckpt"]; + + data = JSON.stringify(objectInfo, undefined, "\t"); + + const outDir = resolve("./data"); + if (!existsSync(outDir)) { + mkdirSync(outDir); + } + + const outPath = resolve(outDir, "object_info.json"); + console.log(`Writing ${Object.keys(objectInfo).length} nodes to ${outPath}`); + writeFileSync(outPath, data, { + encoding: "utf8", + }); + res(); + }); + }) + .on("error", rej); + }); + success = true; + break; + } catch (error) { + console.log(i + "/30", error); + if (i === 0) { + // Start the server on first iteration if it fails to connect + console.log("Starting ComfyUI server..."); + + let python = resolve("../../python_embeded/python.exe"); + let args; + let cwd; + if (existsSync(python)) { + args = ["-s", "ComfyUI/main.py"]; + cwd = "../.."; + } else { + python = "python"; + args = ["main.py"]; + cwd = ".."; + } + args.push("--cpu"); + console.log(python, ...args); + child = spawn(python, args, { cwd }); + child.on("error", (err) => { + console.log(`Server error (${err})`); + i = 30; + }); + child.on("exit", (code) => { + if (!success) { + console.log(`Server exited (${code})`); + i = 30; + } + }); + } + await new Promise((r) => { + setTimeout(r, 1000); + }); + } + } + + child?.kill(); + + if (!success) { + throw new Error("Waiting for server failed..."); + } +} + + setup(); \ No newline at end of file diff --git a/tests-ui/tests/widgetInputs.test.js b/tests-ui/tests/widgetInputs.test.js new file mode 100644 index 000000000..022e54926 --- /dev/null +++ b/tests-ui/tests/widgetInputs.test.js @@ -0,0 +1,319 @@ +// @ts-check +/// + +const { start, makeNodeDef, checkBeforeAndAfterReload, assertNotNullOrUndefined } = require("../utils"); +const lg = require("../utils/litegraph"); + +/** + * @typedef { import("../utils/ezgraph") } Ez + * @typedef { ReturnType["ez"] } EzNodeFactory + */ + +/** + * @param { EzNodeFactory } ez + * @param { InstanceType } graph + * @param { InstanceType } input + * @param { string } widgetType + * @param { boolean } hasControlWidget + * @returns + */ +async function connectPrimitiveAndReload(ez, graph, input, widgetType, hasControlWidget) { + // Connect to primitive and ensure its still connected after + let primitive = ez.PrimitiveNode(); + primitive.outputs[0].connectTo(input); + + await checkBeforeAndAfterReload(graph, async () => { + primitive = graph.find(primitive); + let { connections } = primitive.outputs[0]; + expect(connections).toHaveLength(1); + expect(connections[0].targetNode.id).toBe(input.node.node.id); + + // Ensure widget is correct type + const valueWidget = primitive.widgets.value; + expect(valueWidget.widget.type).toBe(widgetType); + + // Check if control_after_generate should be added + if (hasControlWidget) { + const controlWidget = primitive.widgets.control_after_generate; + expect(controlWidget.widget.type).toBe("combo"); + } + + // Ensure we dont have other widgets + expect(primitive.node.widgets).toHaveLength(1 + +!!hasControlWidget); + }); + + return primitive; +} + +describe("widget inputs", () => { + beforeEach(() => { + lg.setup(global); + }); + + afterEach(() => { + lg.teardown(global); + }); + + [ + { name: "int", type: "INT", widget: "number", control: true }, + { name: "float", type: "FLOAT", widget: "number", control: true }, + { name: "text", type: "STRING" }, + { + name: "customtext", + type: "STRING", + opt: { multiline: true }, + }, + { name: "toggle", type: "BOOLEAN" }, + { name: "combo", type: ["a", "b", "c"], control: true }, + ].forEach((c) => { + test(`widget conversion + primitive works on ${c.name}`, async () => { + const { ez, graph } = await start({ + mockNodeDefs: makeNodeDef("TestNode", { [c.name]: [c.type, c.opt ?? {}] }), + }); + + // Create test node and convert to input + const n = ez.TestNode(); + const w = n.widgets[c.name]; + w.convertToInput(); + expect(w.isConvertedToInput).toBeTruthy(); + const input = w.getConvertedInput(); + expect(input).toBeTruthy(); + + // @ts-ignore : input is valid here + await connectPrimitiveAndReload(ez, graph, input, c.widget ?? c.name, c.control); + }); + }); + + test("converted widget works after reload", async () => { + const { ez, graph } = await start(); + let n = ez.CheckpointLoaderSimple(); + + const inputCount = n.inputs.length; + + // Convert ckpt name to an input + n.widgets.ckpt_name.convertToInput(); + expect(n.widgets.ckpt_name.isConvertedToInput).toBeTruthy(); + expect(n.inputs.ckpt_name).toBeTruthy(); + expect(n.inputs.length).toEqual(inputCount + 1); + + // Convert back to widget and ensure input is removed + n.widgets.ckpt_name.convertToWidget(); + expect(n.widgets.ckpt_name.isConvertedToInput).toBeFalsy(); + expect(n.inputs.ckpt_name).toBeFalsy(); + expect(n.inputs.length).toEqual(inputCount); + + // Convert again and reload the graph to ensure it maintains state + n.widgets.ckpt_name.convertToInput(); + expect(n.inputs.length).toEqual(inputCount + 1); + + const primitive = await connectPrimitiveAndReload(ez, graph, n.inputs.ckpt_name, "combo", true); + + // Disconnect & reconnect + primitive.outputs[0].connections[0].disconnect(); + let { connections } = primitive.outputs[0]; + expect(connections).toHaveLength(0); + + primitive.outputs[0].connectTo(n.inputs.ckpt_name); + ({ connections } = primitive.outputs[0]); + expect(connections).toHaveLength(1); + expect(connections[0].targetNode.id).toBe(n.node.id); + + // Convert back to widget and ensure input is removed + n.widgets.ckpt_name.convertToWidget(); + expect(n.widgets.ckpt_name.isConvertedToInput).toBeFalsy(); + expect(n.inputs.ckpt_name).toBeFalsy(); + expect(n.inputs.length).toEqual(inputCount); + }); + + test("converted widget works on clone", async () => { + const { graph, ez } = await start(); + let n = ez.CheckpointLoaderSimple(); + + // Convert the widget to an input + n.widgets.ckpt_name.convertToInput(); + expect(n.widgets.ckpt_name.isConvertedToInput).toBeTruthy(); + + // Clone the node + n.menu["Clone"].call(); + expect(graph.nodes).toHaveLength(2); + const clone = graph.nodes[1]; + expect(clone.id).not.toEqual(n.id); + + // Ensure the clone has an input + expect(clone.widgets.ckpt_name.isConvertedToInput).toBeTruthy(); + expect(clone.inputs.ckpt_name).toBeTruthy(); + + // Ensure primitive connects to both nodes + let primitive = ez.PrimitiveNode(); + primitive.outputs[0].connectTo(n.inputs.ckpt_name); + primitive.outputs[0].connectTo(clone.inputs.ckpt_name); + expect(primitive.outputs[0].connections).toHaveLength(2); + + // Convert back to widget and ensure input is removed + clone.widgets.ckpt_name.convertToWidget(); + expect(clone.widgets.ckpt_name.isConvertedToInput).toBeFalsy(); + expect(clone.inputs.ckpt_name).toBeFalsy(); + }); + + test("shows missing node error on custom node with converted input", async () => { + const { graph } = await start(); + + const dialogShow = jest.spyOn(graph.app.ui.dialog, "show"); + + await graph.app.loadGraphData({ + last_node_id: 3, + last_link_id: 4, + nodes: [ + { + id: 1, + type: "TestNode", + pos: [41.87329101561909, 389.7381480823742], + size: { 0: 220, 1: 374 }, + flags: {}, + order: 1, + mode: 0, + inputs: [{ name: "test", type: "FLOAT", link: 4, widget: { name: "test" }, slot_index: 0 }], + outputs: [], + properties: { "Node name for S&R": "TestNode" }, + widgets_values: [1], + }, + { + id: 3, + type: "PrimitiveNode", + pos: [-312, 433], + size: { 0: 210, 1: 82 }, + flags: {}, + order: 0, + mode: 0, + outputs: [{ links: [4], widget: { name: "test" } }], + title: "test", + properties: {}, + }, + ], + links: [[4, 3, 0, 1, 6, "FLOAT"]], + groups: [], + config: {}, + extra: {}, + version: 0.4, + }); + + expect(dialogShow).toBeCalledTimes(1); + expect(dialogShow.mock.calls[0][0]).toContain("the following node types were not found"); + expect(dialogShow.mock.calls[0][0]).toContain("TestNode"); + }); + + test("defaultInput widgets can be converted back to inputs", async () => { + const { graph, ez } = await start({ + mockNodeDefs: makeNodeDef("TestNode", { example: ["INT", { defaultInput: true }] }), + }); + + // Create test node and ensure it starts as an input + let n = ez.TestNode(); + let w = n.widgets.example; + expect(w.isConvertedToInput).toBeTruthy(); + let input = w.getConvertedInput(); + expect(input).toBeTruthy(); + + // Ensure it can be converted to + w.convertToWidget(); + expect(w.isConvertedToInput).toBeFalsy(); + expect(n.inputs.length).toEqual(0); + // and from + w.convertToInput(); + expect(w.isConvertedToInput).toBeTruthy(); + input = w.getConvertedInput(); + + // Reload and ensure it still only has 1 converted widget + if (!assertNotNullOrUndefined(input)) return; + + await connectPrimitiveAndReload(ez, graph, input, "number", true); + n = graph.find(n); + expect(n.widgets).toHaveLength(1); + w = n.widgets.example; + expect(w.isConvertedToInput).toBeTruthy(); + + // Convert back to widget and ensure it is still a widget after reload + w.convertToWidget(); + await graph.reload(); + n = graph.find(n); + expect(n.widgets).toHaveLength(1); + expect(n.widgets[0].isConvertedToInput).toBeFalsy(); + expect(n.inputs.length).toEqual(0); + }); + + test("forceInput widgets can not be converted back to inputs", async () => { + const { graph, ez } = await start({ + mockNodeDefs: makeNodeDef("TestNode", { example: ["INT", { forceInput: true }] }), + }); + + // Create test node and ensure it starts as an input + let n = ez.TestNode(); + let w = n.widgets.example; + expect(w.isConvertedToInput).toBeTruthy(); + const input = w.getConvertedInput(); + expect(input).toBeTruthy(); + + // Convert to widget should error + expect(() => w.convertToWidget()).toThrow(); + + // Reload and ensure it still only has 1 converted widget + if (assertNotNullOrUndefined(input)) { + await connectPrimitiveAndReload(ez, graph, input, "number", true); + n = graph.find(n); + expect(n.widgets).toHaveLength(1); + expect(n.widgets.example.isConvertedToInput).toBeTruthy(); + } + }); + + test("primitive can connect to matching combos on converted widgets", async () => { + const { ez } = await start({ + mockNodeDefs: { + ...makeNodeDef("TestNode1", { example: [["A", "B", "C"], { forceInput: true }] }), + ...makeNodeDef("TestNode2", { example: [["A", "B", "C"], { forceInput: true }] }), + }, + }); + + const n1 = ez.TestNode1(); + const n2 = ez.TestNode2(); + const p = ez.PrimitiveNode(); + p.outputs[0].connectTo(n1.inputs[0]); + p.outputs[0].connectTo(n2.inputs[0]); + expect(p.outputs[0].connections).toHaveLength(2); + const valueWidget = p.widgets.value; + expect(valueWidget.widget.type).toBe("combo"); + expect(valueWidget.widget.options.values).toEqual(["A", "B", "C"]); + }); + + test("primitive can not connect to non matching combos on converted widgets", async () => { + const { ez } = await start({ + mockNodeDefs: { + ...makeNodeDef("TestNode1", { example: [["A", "B", "C"], { forceInput: true }] }), + ...makeNodeDef("TestNode2", { example: [["A", "B"], { forceInput: true }] }), + }, + }); + + const n1 = ez.TestNode1(); + const n2 = ez.TestNode2(); + const p = ez.PrimitiveNode(); + p.outputs[0].connectTo(n1.inputs[0]); + expect(() => p.outputs[0].connectTo(n2.inputs[0])).toThrow(); + expect(p.outputs[0].connections).toHaveLength(1); + }); + + test("combo output can not connect to non matching combos list input", async () => { + const { ez } = await start({ + mockNodeDefs: { + ...makeNodeDef("TestNode1", {}, [["A", "B"]]), + ...makeNodeDef("TestNode2", { example: [["A", "B"], { forceInput: true}] }), + ...makeNodeDef("TestNode3", { example: [["A", "B", "C"], { forceInput: true}] }), + }, + }); + + const n1 = ez.TestNode1(); + const n2 = ez.TestNode2(); + const n3 = ez.TestNode3(); + + n1.outputs[0].connectTo(n2.inputs[0]); + expect(() => n1.outputs[0].connectTo(n3.inputs[0])).toThrow(); + }); +}); diff --git a/tests-ui/utils/ezgraph.js b/tests-ui/utils/ezgraph.js new file mode 100644 index 000000000..0e81fd47b --- /dev/null +++ b/tests-ui/utils/ezgraph.js @@ -0,0 +1,417 @@ +// @ts-check +/// + +/** + * @typedef { import("../../web/scripts/app")["app"] } app + * @typedef { import("../../web/types/litegraph") } LG + * @typedef { import("../../web/types/litegraph").IWidget } IWidget + * @typedef { import("../../web/types/litegraph").ContextMenuItem } ContextMenuItem + * @typedef { import("../../web/types/litegraph").INodeInputSlot } INodeInputSlot + * @typedef { import("../../web/types/litegraph").INodeOutputSlot } INodeOutputSlot + * @typedef { InstanceType & { widgets?: Array } } LGNode + * @typedef { (...args: EzOutput[] | [...EzOutput[], Record]) => EzNode } EzNodeFactory + */ + +export class EzConnection { + /** @type { app } */ + app; + /** @type { InstanceType } */ + link; + + get originNode() { + return new EzNode(this.app, this.app.graph.getNodeById(this.link.origin_id)); + } + + get originOutput() { + return this.originNode.outputs[this.link.origin_slot]; + } + + get targetNode() { + return new EzNode(this.app, this.app.graph.getNodeById(this.link.target_id)); + } + + get targetInput() { + return this.targetNode.inputs[this.link.target_slot]; + } + + /** + * @param { app } app + * @param { InstanceType } link + */ + constructor(app, link) { + this.app = app; + this.link = link; + } + + disconnect() { + this.targetInput.disconnect(); + } +} + +export class EzSlot { + /** @type { EzNode } */ + node; + /** @type { number } */ + index; + + /** + * @param { EzNode } node + * @param { number } index + */ + constructor(node, index) { + this.node = node; + this.index = index; + } +} + +export class EzInput extends EzSlot { + /** @type { INodeInputSlot } */ + input; + + /** + * @param { EzNode } node + * @param { number } index + * @param { INodeInputSlot } input + */ + constructor(node, index, input) { + super(node, index); + this.input = input; + } + + disconnect() { + this.node.node.disconnectInput(this.index); + } +} + +export class EzOutput extends EzSlot { + /** @type { INodeOutputSlot } */ + output; + + /** + * @param { EzNode } node + * @param { number } index + * @param { INodeOutputSlot } output + */ + constructor(node, index, output) { + super(node, index); + this.output = output; + } + + get connections() { + return (this.node.node.outputs?.[this.index]?.links ?? []).map( + (l) => new EzConnection(this.node.app, this.node.app.graph.links[l]) + ); + } + + /** + * @param { EzInput } input + */ + connectTo(input) { + if (!input) throw new Error("Invalid input"); + + /** + * @type { LG["LLink"] | null } + */ + const link = this.node.node.connect(this.index, input.node.node, input.index); + if (!link) { + const inp = input.input; + const inName = inp.name || inp.label || inp.type; + throw new Error( + `Connecting from ${input.node.node.type}[${inName}#${input.index}] -> ${this.node.node.type}[${ + this.output.name ?? this.output.type + }#${this.index}] failed.` + ); + } + return link; + } +} + +export class EzNodeMenuItem { + /** @type { EzNode } */ + node; + /** @type { number } */ + index; + /** @type { ContextMenuItem } */ + item; + + /** + * @param { EzNode } node + * @param { number } index + * @param { ContextMenuItem } item + */ + constructor(node, index, item) { + this.node = node; + this.index = index; + this.item = item; + } + + call(selectNode = true) { + if (!this.item?.callback) throw new Error(`Menu Item ${this.item?.content ?? "[null]"} has no callback.`); + if (selectNode) { + this.node.select(); + } + this.item.callback.call(this.node.node, undefined, undefined, undefined, undefined, this.node.node); + } +} + +export class EzWidget { + /** @type { EzNode } */ + node; + /** @type { number } */ + index; + /** @type { IWidget } */ + widget; + + /** + * @param { EzNode } node + * @param { number } index + * @param { IWidget } widget + */ + constructor(node, index, widget) { + this.node = node; + this.index = index; + this.widget = widget; + } + + get value() { + return this.widget.value; + } + + set value(v) { + this.widget.value = v; + } + + get isConvertedToInput() { + // @ts-ignore : this type is valid for converted widgets + return this.widget.type === "converted-widget"; + } + + getConvertedInput() { + if (!this.isConvertedToInput) throw new Error(`Widget ${this.widget.name} is not converted to input.`); + + return this.node.inputs.find((inp) => inp.input["widget"]?.name === this.widget.name); + } + + convertToWidget() { + if (!this.isConvertedToInput) + throw new Error(`Widget ${this.widget.name} cannot be converted as it is already a widget.`); + this.node.menu[`Convert ${this.widget.name} to widget`].call(); + } + + convertToInput() { + if (this.isConvertedToInput) + throw new Error(`Widget ${this.widget.name} cannot be converted as it is already an input.`); + this.node.menu[`Convert ${this.widget.name} to input`].call(); + } +} + +export class EzNode { + /** @type { app } */ + app; + /** @type { LGNode } */ + node; + + /** + * @param { app } app + * @param { LGNode } node + */ + constructor(app, node) { + this.app = app; + this.node = node; + } + + get id() { + return this.node.id; + } + + get inputs() { + return this.#makeLookupArray("inputs", "name", EzInput); + } + + get outputs() { + return this.#makeLookupArray("outputs", "name", EzOutput); + } + + get widgets() { + return this.#makeLookupArray("widgets", "name", EzWidget); + } + + get menu() { + return this.#makeLookupArray(() => this.app.canvas.getNodeMenuOptions(this.node), "content", EzNodeMenuItem); + } + + select() { + this.app.canvas.selectNode(this.node); + } + + // /** + // * @template { "inputs" | "outputs" } T + // * @param { T } type + // * @returns { Record & (type extends "inputs" ? EzInput [] : EzOutput[]) } + // */ + // #getSlotItems(type) { + // // @ts-ignore : these items are correct + // return (this.node[type] ?? []).reduce((p, s, i) => { + // if (s.name in p) { + // throw new Error(`Unable to store input ${s.name} on array as name conflicts.`); + // } + // // @ts-ignore + // p.push((p[s.name] = new (type === "inputs" ? EzInput : EzOutput)(this, i, s))); + // return p; + // }, Object.assign([], { $: this })); + // } + + /** + * @template { { new(node: EzNode, index: number, obj: any): any } } T + * @param { "inputs" | "outputs" | "widgets" | (() => Array) } nodeProperty + * @param { string } nameProperty + * @param { T } ctor + * @returns { Record> & Array> } + */ + #makeLookupArray(nodeProperty, nameProperty, ctor) { + const items = typeof nodeProperty === "function" ? nodeProperty() : this.node[nodeProperty]; + // @ts-ignore + return (items ?? []).reduce((p, s, i) => { + if (!s) return p; + + const name = s[nameProperty]; + // @ts-ignore + if (!name || name in p) { + throw new Error(`Unable to store ${nodeProperty} ${name} on array as name conflicts.`); + } + // @ts-ignore + p.push((p[name] = new ctor(this, i, s))); + return p; + }, Object.assign([], { $: this })); + } +} + +export class EzGraph { + /** @type { app } */ + app; + + /** + * @param { app } app + */ + constructor(app) { + this.app = app; + } + + get nodes() { + return this.app.graph._nodes.map((n) => new EzNode(this.app, n)); + } + + clear() { + this.app.graph.clear(); + } + + arrange() { + this.app.graph.arrange(); + } + + stringify() { + return JSON.stringify(this.app.graph.serialize(), undefined, "\t"); + } + + /** + * @param { number | LGNode | EzNode } obj + * @returns { EzNode } + */ + find(obj) { + let match; + let id; + if (typeof obj === "number") { + id = obj; + } else { + id = obj.id; + } + + match = this.app.graph.getNodeById(id); + + if (!match) { + throw new Error(`Unable to find node with ID ${id}.`); + } + + return new EzNode(this.app, match); + } + + /** + * @returns { Promise } + */ + reload() { + const graph = JSON.parse(JSON.stringify(this.app.graph.serialize())); + return new Promise((r) => { + this.app.graph.clear(); + setTimeout(async () => { + await this.app.loadGraphData(graph); + r(); + }, 10); + }); + } +} + +export const Ez = { + /** + * Quickly build and interact with a ComfyUI graph + * @example + * const { ez, graph } = Ez.graph(app); + * graph.clear(); + * const [model, clip, vae] = ez.CheckpointLoaderSimple(); + * const [pos] = ez.CLIPTextEncode(clip, { text: "positive" }); + * const [neg] = ez.CLIPTextEncode(clip, { text: "negative" }); + * const [latent] = ez.KSampler(model, pos, neg, ...ez.EmptyLatentImage()); + * const [image] = ez.VAEDecode(latent, vae); + * const saveNode = ez.SaveImage(image).node; + * console.log(saveNode); + * graph.arrange(); + * @param { app } app + * @param { LG["LiteGraph"] } LiteGraph + * @param { LG["LGraphCanvas"] } LGraphCanvas + * @param { boolean } clearGraph + * @returns { { graph: EzGraph, ez: Record } } + */ + graph(app, LiteGraph = window["LiteGraph"], LGraphCanvas = window["LGraphCanvas"], clearGraph = true) { + // Always set the active canvas so things work + LGraphCanvas.active_canvas = app.canvas; + + if (clearGraph) { + app.graph.clear(); + } + + // @ts-ignore : this proxy handles utility methods & node creation + const factory = new Proxy( + {}, + { + get(_, p) { + if (typeof p !== "string") throw new Error("Invalid node"); + const node = LiteGraph.createNode(p); + if (!node) throw new Error(`Unknown node "${p}"`); + app.graph.add(node); + + /** + * @param {Parameters} args + */ + return function (...args) { + const ezNode = new EzNode(app, node); + const inputs = ezNode.inputs; + + let slot = 0; + for (const arg of args) { + if (arg instanceof EzOutput) { + arg.connectTo(inputs[slot++]); + } else { + for (const k in arg) { + ezNode.widgets[k].value = arg[k]; + } + } + } + + return ezNode; + }; + }, + } + ); + + return { graph: new EzGraph(app), ez: factory }; + }, +}; diff --git a/tests-ui/utils/index.js b/tests-ui/utils/index.js new file mode 100644 index 000000000..01c58b21f --- /dev/null +++ b/tests-ui/utils/index.js @@ -0,0 +1,71 @@ +const { mockApi } = require("./setup"); +const { Ez } = require("./ezgraph"); + +/** + * + * @param { Parameters[0] } config + * @returns + */ +export async function start(config = undefined) { + mockApi(config); + const { app } = require("../../web/scripts/app"); + await app.setup(); + return Ez.graph(app, global["LiteGraph"], global["LGraphCanvas"]); +} + +/** + * @param { ReturnType["graph"] } graph + * @param { (hasReloaded: boolean) => (Promise | void) } cb + */ +export async function checkBeforeAndAfterReload(graph, cb) { + await cb(false); + await graph.reload(); + await cb(true); +} + +/** + * @param { string } name + * @param { Record } input + * @param { (string | string[])[] | Record } output + * @returns { Record } + */ +export function makeNodeDef(name, input, output = {}) { + const nodeDef = { + name, + category: "test", + output: [], + output_name: [], + output_is_list: [], + input: { + required: {} + }, + }; + for(const k in input) { + nodeDef.input.required[k] = typeof input[k] === "string" ? [input[k], {}] : [...input[k]]; + } + if(output instanceof Array) { + output = output.reduce((p, c) => { + p[c] = c; + return p; + }, {}) + } + for(const k in output) { + nodeDef.output.push(output[k]); + nodeDef.output_name.push(k); + nodeDef.output_is_list.push(false); + } + + return { [name]: nodeDef }; +} + +/** +/** + * @template { any } T + * @param { T } x + * @returns { x is Exclude } + */ +export function assertNotNullOrUndefined(x) { + expect(x).not.toEqual(null); + expect(x).not.toEqual(undefined); + return true; +} \ No newline at end of file diff --git a/tests-ui/utils/litegraph.js b/tests-ui/utils/litegraph.js new file mode 100644 index 000000000..777f8c3ba --- /dev/null +++ b/tests-ui/utils/litegraph.js @@ -0,0 +1,36 @@ +const fs = require("fs"); +const path = require("path"); +const { nop } = require("../utils/nopProxy"); + +function forEachKey(cb) { + for (const k of [ + "LiteGraph", + "LGraph", + "LLink", + "LGraphNode", + "LGraphGroup", + "DragAndScale", + "LGraphCanvas", + "ContextMenu", + ]) { + cb(k); + } +} + +export function setup(ctx) { + const lg = fs.readFileSync(path.resolve("../web/lib/litegraph.core.js"), "utf-8"); + const globalTemp = {}; + (function (console) { + eval(lg); + }).call(globalTemp, nop); + + forEachKey((k) => (ctx[k] = globalTemp[k])); + require(path.resolve("../web/lib/litegraph.extensions.js")); +} + +export function teardown(ctx) { + forEachKey((k) => delete ctx[k]); + + // Clear document after each run + document.getElementsByTagName("html")[0].innerHTML = ""; +} diff --git a/tests-ui/utils/nopProxy.js b/tests-ui/utils/nopProxy.js new file mode 100644 index 000000000..2502d9d03 --- /dev/null +++ b/tests-ui/utils/nopProxy.js @@ -0,0 +1,6 @@ +export const nop = new Proxy(function () {}, { + get: () => nop, + set: () => true, + apply: () => nop, + construct: () => nop, +}); diff --git a/tests-ui/utils/setup.js b/tests-ui/utils/setup.js new file mode 100644 index 000000000..17e8ac1ad --- /dev/null +++ b/tests-ui/utils/setup.js @@ -0,0 +1,45 @@ +require("../../web/scripts/api"); + +const fs = require("fs"); +const path = require("path"); +function* walkSync(dir) { + const files = fs.readdirSync(dir, { withFileTypes: true }); + for (const file of files) { + if (file.isDirectory()) { + yield* walkSync(path.join(dir, file.name)); + } else { + yield path.join(dir, file.name); + } + } +} + +/** + * @typedef { import("../../web/types/comfy").ComfyObjectInfo } ComfyObjectInfo + */ + +/** + * @param { { mockExtensions?: string[], mockNodeDefs?: Record } } config + */ +export function mockApi({ mockExtensions, mockNodeDefs } = {}) { + if (!mockExtensions) { + mockExtensions = Array.from(walkSync(path.resolve("../web/extensions/core"))) + .filter((x) => x.endsWith(".js")) + .map((x) => path.relative(path.resolve("../web"), x)); + } + if (!mockNodeDefs) { + mockNodeDefs = JSON.parse(fs.readFileSync(path.resolve("./data/object_info.json"))); + } + + jest.mock("../../web/scripts/api", () => ({ + get api() { + return { + addEventListener: jest.fn(), + getSystemStats: jest.fn(), + getExtensions: jest.fn(() => mockExtensions), + getNodeDefs: jest.fn(() => mockNodeDefs), + init: jest.fn(), + apiURL: jest.fn((x) => "../../web/" + x), + }; + }, + })); +} diff --git a/web/extensions/core/widgetInputs.js b/web/extensions/core/widgetInputs.js index ce05a29e9..84abd8b7d 100644 --- a/web/extensions/core/widgetInputs.js +++ b/web/extensions/core/widgetInputs.js @@ -100,6 +100,27 @@ function getWidgetType(config) { return { type }; } + +function isValidCombo(combo, obj) { + // New input isnt a combo + if (!(obj instanceof Array)) { + console.log(`connection rejected: tried to connect combo to ${obj}`); + return false; + } + // New imput combo has a different size + if (combo.length !== obj.length) { + console.log(`connection rejected: combo lists dont match`); + return false; + } + // New input combo has different elements + if (combo.find((v, i) => obj[i] !== v)) { + console.log(`connection rejected: combo lists dont match`); + return false; + } + + return true; +} + app.registerExtension({ name: "Comfy.WidgetInputs", async beforeRegisterNodeDef(nodeType, nodeData, app) { @@ -256,6 +277,28 @@ app.registerExtension({ return r; }; + + // Prevent connecting COMBO lists to converted inputs that dont match types + const onConnectInput = nodeType.prototype.onConnectInput; + nodeType.prototype.onConnectInput = function (targetSlot, type, output, originNode, originSlot) { + const v = onConnectInput?.(this, arguments); + // Not a combo, ignore + if (type !== "COMBO") return v; + // Primitive output, allow that to handle + if (originNode.outputs[originSlot].widget) return v; + + // Ensure target is also a combo + const targetCombo = this.inputs[targetSlot].widget?.[GET_CONFIG]?.()?.[0]; + if (!targetCombo || !(targetCombo instanceof Array)) return v; + + // Check they match + const originConfig = originNode.constructor?.nodeData?.output?.[originSlot]; + if (!originConfig || !isValidCombo(targetCombo, originConfig)) { + return false; + } + + return v; + }; }, registerCustomNodes() { class PrimitiveNode { @@ -315,7 +358,7 @@ app.registerExtension({ onAfterGraphConfigured() { if (this.outputs[0].links?.length && !this.widgets?.length) { - this.#onFirstConnection(); + if (!this.#onFirstConnection()) return; // Populate widget values from config data if (this.widgets) { @@ -386,13 +429,16 @@ app.registerExtension({ widget = input.widget; } - const { type } = getWidgetType(widget[GET_CONFIG]()); + const config = widget[GET_CONFIG]?.(); + if (!config) return; + + const { type } = getWidgetType(config); // Update our output to restrict to the widget type this.outputs[0].type = type; this.outputs[0].name = type; this.outputs[0].widget = widget; - this.#createWidget(widget[CONFIG] ?? widget[GET_CONFIG](), theirNode, widget.name, recreating); + this.#createWidget(widget[CONFIG] ?? config, theirNode, widget.name, recreating); } #createWidget(inputData, node, widgetName, recreating) { @@ -497,21 +543,7 @@ app.registerExtension({ const config2 = input.widget[GET_CONFIG](); if (config1[0] instanceof Array) { - // New input isnt a combo - if (!(config2[0] instanceof Array)) { - console.log(`connection rejected: tried to connect combo to ${config2[0]}`); - return false; - } - // New imput combo has a different size - if (config1[0].length !== config2[0].length) { - console.log(`connection rejected: combo lists dont match`); - return false; - } - // New input combo has different elements - if (config1[0].find((v, i) => config2[0][i] !== v)) { - console.log(`connection rejected: combo lists dont match`); - return false; - } + if (!isValidCombo(config1[0], config2[0])) return false; } else if (config1[0] !== config2[0]) { // Types dont match console.log(`connection rejected: types dont match`, config1[0], config2[0]); From 25e3e5af6850119c506efc49ab0364b5bb9aa0d0 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 20 Oct 2023 22:52:12 -0400 Subject: [PATCH 013/170] Use npm ci for ci instead of npm install in tests. --- .github/workflows/test-ui.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test-ui.yaml b/.github/workflows/test-ui.yaml index 62b4c35f6..292ff5c63 100644 --- a/.github/workflows/test-ui.yaml +++ b/.github/workflows/test-ui.yaml @@ -19,7 +19,7 @@ jobs: pip install -r requirements.txt - name: Run Tests run: | - npm install + npm ci npm run test:generate npm test working-directory: ./tests-ui From e0c0029fc1e76beed9dd61176b33fc25796a7d57 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 20 Oct 2023 23:00:05 -0400 Subject: [PATCH 014/170] Try to speed up the test-ui workflow. --- .github/workflows/test-ui.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test-ui.yaml b/.github/workflows/test-ui.yaml index 292ff5c63..3e96ac18f 100644 --- a/.github/workflows/test-ui.yaml +++ b/.github/workflows/test-ui.yaml @@ -16,7 +16,7 @@ jobs: - name: Install requirements run: | python -m pip install --upgrade pip - pip install -r requirements.txt + pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu -r requirements.txt - name: Run Tests run: | npm ci From 77c893350a7d9b28c25356f90a0ba9981b3771f9 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 20 Oct 2023 23:13:54 -0400 Subject: [PATCH 015/170] Fix previous commit that broke tests. --- .github/workflows/test-ui.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/test-ui.yaml b/.github/workflows/test-ui.yaml index 3e96ac18f..950691755 100644 --- a/.github/workflows/test-ui.yaml +++ b/.github/workflows/test-ui.yaml @@ -16,7 +16,8 @@ jobs: - name: Install requirements run: | python -m pip install --upgrade pip - pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu -r requirements.txt + pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu + pip install -r requirements.txt - name: Run Tests run: | npm ci From 1443caf373c704244b11eb4113af68d353c741d4 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sat, 21 Oct 2023 05:16:38 -0400 Subject: [PATCH 016/170] HyperTile node, can be found in: _for_testing->HyperTile --- comfy_extras/nodes_hypertile.py | 83 +++++++++++++++++++++++++++++++++ nodes.py | 3 +- 2 files changed, 85 insertions(+), 1 deletion(-) create mode 100644 comfy_extras/nodes_hypertile.py diff --git a/comfy_extras/nodes_hypertile.py b/comfy_extras/nodes_hypertile.py new file mode 100644 index 000000000..0d7d4c954 --- /dev/null +++ b/comfy_extras/nodes_hypertile.py @@ -0,0 +1,83 @@ +#Taken from: https://github.com/tfernd/HyperTile/ + +import math +from einops import rearrange +import random + +def random_divisor(value: int, min_value: int, /, max_options: int = 1, counter = 0) -> int: + min_value = min(min_value, value) + + # All big divisors of value (inclusive) + divisors = [i for i in range(min_value, value + 1) if value % i == 0] + + ns = [value // i for i in divisors[:max_options]] # has at least 1 element + + random.seed(counter) + idx = random.randint(0, len(ns) - 1) + + return ns[idx] + +class HyperTile: + @classmethod + def INPUT_TYPES(s): + return {"required": { "model": ("MODEL",), + "tile_size": ("INT", {"default": 256, "min": 1, "max": 2048}), + "swap_size": ("INT", {"default": 2, "min": 1, "max": 128}), + "max_depth": ("INT", {"default": 0, "min": 0, "max": 10}), + "scale_depth": ("BOOLEAN", {"default": False}), + }} + RETURN_TYPES = ("MODEL",) + FUNCTION = "patch" + + CATEGORY = "_for_testing" + + def patch(self, model, tile_size, swap_size, max_depth, scale_depth): + model_channels = model.model.model_config.unet_config["model_channels"] + + apply_to = set() + temp = model_channels + for x in range(max_depth + 1): + apply_to.add(temp) + temp *= 2 + + latent_tile_size = max(32, tile_size) // 8 + self.temp = None + self.counter = 1 + + def hypertile_in(q, k, v, extra_options): + if q.shape[-1] in apply_to: + shape = extra_options["original_shape"] + aspect_ratio = shape[-1] / shape[-2] + + hw = q.size(1) + h, w = round(math.sqrt(hw * aspect_ratio)), round(math.sqrt(hw / aspect_ratio)) + + factor = 2**((q.shape[-1] // model_channels) - 1) if scale_depth else 1 + nh = random_divisor(h, latent_tile_size * factor, swap_size, self.counter) + self.counter += 1 + nw = random_divisor(w, latent_tile_size * factor, swap_size, self.counter) + self.counter += 1 + + if nh * nw > 1: + q = rearrange(q, "b (nh h nw w) c -> (b nh nw) (h w) c", h=h // nh, w=w // nw, nh=nh, nw=nw) + self.temp = (nh, nw, h, w) + return q, k, v + + return q, k, v + def hypertile_out(out, extra_options): + if self.temp is not None: + nh, nw, h, w = self.temp + self.temp = None + out = rearrange(out, "(b nh nw) hw c -> b nh nw hw c", nh=nh, nw=nw) + out = rearrange(out, "b nh nw (h w) c -> b (nh h nw w) c", h=h // nh, w=w // nw) + return out + + + m = model.clone() + m.set_model_attn1_patch(hypertile_in) + m.set_model_attn1_output_patch(hypertile_out) + return (m, ) + +NODE_CLASS_MAPPINGS = { + "HyperTile": HyperTile, +} diff --git a/nodes.py b/nodes.py index 0dbc2be32..61ebbb8b4 100644 --- a/nodes.py +++ b/nodes.py @@ -1796,7 +1796,8 @@ def init_custom_nodes(): "nodes_clip_sdxl.py", "nodes_canny.py", "nodes_freelunch.py", - "nodes_custom_sampler.py" + "nodes_custom_sampler.py", + "nodes_hypertile.py", ] for node_file in extras_files: From 9906e3efe31a0fc399262766da33c210fb4e8215 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sat, 21 Oct 2023 13:23:03 -0400 Subject: [PATCH 017/170] Make xformers work with hypertile. --- comfy/ldm/modules/attention.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/comfy/ldm/modules/attention.py b/comfy/ldm/modules/attention.py index 9cd14a537..4eda361f3 100644 --- a/comfy/ldm/modules/attention.py +++ b/comfy/ldm/modules/attention.py @@ -253,12 +253,14 @@ def attention_split(q, k, v, heads, mask=None): return r2 def attention_xformers(q, k, v, heads, mask=None): - b, _, _ = q.shape + b, _, dim_head = q.shape + dim_head //= heads + q, k, v = map( lambda t: t.unsqueeze(3) - .reshape(b, t.shape[1], heads, -1) + .reshape(b, -1, heads, dim_head) .permute(0, 2, 1, 3) - .reshape(b * heads, t.shape[1], -1) + .reshape(b * heads, -1, dim_head) .contiguous(), (q, k, v), ) @@ -270,9 +272,9 @@ def attention_xformers(q, k, v, heads, mask=None): raise NotImplementedError out = ( out.unsqueeze(0) - .reshape(b, heads, out.shape[1], -1) + .reshape(b, heads, -1, dim_head) .permute(0, 2, 1, 3) - .reshape(b, out.shape[1], -1) + .reshape(b, -1, heads * dim_head) ) return out From a0690f9df9e731ff31fb9b0d64f1fe7cbc918789 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sat, 21 Oct 2023 20:31:24 -0400 Subject: [PATCH 018/170] Fix t2i adapter issue. --- comfy/controlnet.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/controlnet.py b/comfy/controlnet.py index 73a40acfa..f1355e64e 100644 --- a/comfy/controlnet.py +++ b/comfy/controlnet.py @@ -416,7 +416,7 @@ class T2IAdapter(ControlBase): if control_prev is not None: return control_prev else: - return {} + return None if self.cond_hint is None or x_noisy.shape[2] * 8 != self.cond_hint.shape[2] or x_noisy.shape[3] * 8 != self.cond_hint.shape[3]: if self.cond_hint is not None: From 8cfce083c4eb09ea95bce59f65f1634e09d12b13 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sat, 21 Oct 2023 22:36:04 -0400 Subject: [PATCH 019/170] Fix primitive node control value not getting loaded. --- web/extensions/core/widgetInputs.js | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/web/extensions/core/widgetInputs.js b/web/extensions/core/widgetInputs.js index 84abd8b7d..bad3ac3a7 100644 --- a/web/extensions/core/widgetInputs.js +++ b/web/extensions/core/widgetInputs.js @@ -463,7 +463,11 @@ app.registerExtension({ } if (widget.type === "number" || widget.type === "combo") { - addValueControlWidget(this, widget, "fixed"); + let control_value = this.widgets_values?.[1]; + if (!control_value) { + control_value = "fixed"; + } + addValueControlWidget(this, widget, control_value); } // When our value changes, update other widgets to reflect our changes From e6bc42df4662e571365ffbafe7c2dfac2cee3116 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sun, 22 Oct 2023 03:51:29 -0400 Subject: [PATCH 020/170] Make sub_quad and split work with hypertile. --- comfy/ldm/modules/attention.py | 41 ++++++++++++++++++++++++---------- 1 file changed, 29 insertions(+), 12 deletions(-) diff --git a/comfy/ldm/modules/attention.py b/comfy/ldm/modules/attention.py index 4eda361f3..f8391e19a 100644 --- a/comfy/ldm/modules/attention.py +++ b/comfy/ldm/modules/attention.py @@ -124,11 +124,14 @@ def attention_basic(q, k, v, heads, mask=None): def attention_sub_quad(query, key, value, heads, mask=None): - scale = (query.shape[-1] // heads) ** -0.5 - query = query.unflatten(-1, (heads, -1)).transpose(1,2).flatten(end_dim=1) - key_t = key.transpose(1,2).unflatten(1, (heads, -1)).flatten(end_dim=1) - del key - value = value.unflatten(-1, (heads, -1)).transpose(1,2).flatten(end_dim=1) + b, _, dim_head = query.shape + dim_head //= heads + + scale = dim_head ** -0.5 + query = query.unsqueeze(3).reshape(b, -1, heads, dim_head).permute(0, 2, 1, 3).reshape(b * heads, -1, dim_head) + value = value.unsqueeze(3).reshape(b, -1, heads, dim_head).permute(0, 2, 1, 3).reshape(b * heads, -1, dim_head) + + key = key.unsqueeze(3).reshape(b, -1, heads, dim_head).permute(0, 2, 3, 1).reshape(b * heads, dim_head, -1) dtype = query.dtype upcast_attention = _ATTN_PRECISION =="fp32" and query.dtype != torch.float32 @@ -137,7 +140,7 @@ def attention_sub_quad(query, key, value, heads, mask=None): else: bytes_per_token = torch.finfo(query.dtype).bits//8 batch_x_heads, q_tokens, _ = query.shape - _, _, k_tokens = key_t.shape + _, _, k_tokens = key.shape qk_matmul_size_bytes = batch_x_heads * bytes_per_token * q_tokens * k_tokens mem_free_total, mem_free_torch = model_management.get_free_memory(query.device, True) @@ -171,7 +174,7 @@ def attention_sub_quad(query, key, value, heads, mask=None): hidden_states = efficient_dot_product_attention( query, - key_t, + key, value, query_chunk_size=query_chunk_size, kv_chunk_size=kv_chunk_size, @@ -186,9 +189,19 @@ def attention_sub_quad(query, key, value, heads, mask=None): return hidden_states def attention_split(q, k, v, heads, mask=None): - scale = (q.shape[-1] // heads) ** -0.5 + b, _, dim_head = q.shape + dim_head //= heads + scale = dim_head ** -0.5 + h = heads - q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v)) + q, k, v = map( + lambda t: t.unsqueeze(3) + .reshape(b, -1, heads, dim_head) + .permute(0, 2, 1, 3) + .reshape(b * heads, -1, dim_head) + .contiguous(), + (q, k, v), + ) r1 = torch.zeros(q.shape[0], q.shape[1], v.shape[2], device=q.device, dtype=q.dtype) @@ -248,9 +261,13 @@ def attention_split(q, k, v, heads, mask=None): del q, k, v - r2 = rearrange(r1, '(b h) n d -> b n (h d)', h=h) - del r1 - return r2 + r1 = ( + r1.unsqueeze(0) + .reshape(b, heads, -1, dim_head) + .permute(0, 2, 1, 3) + .reshape(b, -1, heads * dim_head) + ) + return r1 def attention_xformers(q, k, v, heads, mask=None): b, _, dim_head = q.shape From 8b65f5de54426f25cc7c08928332e5b7bf0fd25f Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sun, 22 Oct 2023 03:59:53 -0400 Subject: [PATCH 021/170] attention_basic now works with hypertile. --- comfy/ldm/modules/attention.py | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/comfy/ldm/modules/attention.py b/comfy/ldm/modules/attention.py index f8391e19a..dcf467489 100644 --- a/comfy/ldm/modules/attention.py +++ b/comfy/ldm/modules/attention.py @@ -95,9 +95,19 @@ def Normalize(in_channels, dtype=None, device=None): return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True, dtype=dtype, device=device) def attention_basic(q, k, v, heads, mask=None): + b, _, dim_head = q.shape + dim_head //= heads + scale = dim_head ** -0.5 + h = heads - scale = (q.shape[-1] // heads) ** -0.5 - q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v)) + q, k, v = map( + lambda t: t.unsqueeze(3) + .reshape(b, -1, heads, dim_head) + .permute(0, 2, 1, 3) + .reshape(b * heads, -1, dim_head) + .contiguous(), + (q, k, v), + ) # force cast to fp32 to avoid overflowing if _ATTN_PRECISION =="fp32": @@ -119,7 +129,12 @@ def attention_basic(q, k, v, heads, mask=None): sim = sim.softmax(dim=-1) out = einsum('b i j, b j d -> b i d', sim.to(v.dtype), v) - out = rearrange(out, '(b h) n d -> b n (h d)', h=h) + out = ( + out.unsqueeze(0) + .reshape(b, heads, -1, dim_head) + .permute(0, 2, 1, 3) + .reshape(b, -1, heads * dim_head) + ) return out From 8594c8be4d8c0d7c9b5eb3d69d0c96cc80cffcc4 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sun, 22 Oct 2023 13:53:59 -0400 Subject: [PATCH 022/170] Empty the cache when torch cache is more than 25% free mem. --- comfy/model_management.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index 64ed19727..53582fc73 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -339,7 +339,11 @@ def free_memory(memory_required, device, keep_loaded=[]): if unloaded_model: soft_empty_cache() - + else: + if vram_state != VRAMState.HIGH_VRAM: + mem_free_total, mem_free_torch = get_free_memory(device, torch_free_too=True) + if mem_free_torch > mem_free_total * 0.25: + soft_empty_cache() def load_models_gpu(models, memory_required=0): global vram_state From 2ec6158e9e10cc5e1cc4b27b2930b75167db20de Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sun, 22 Oct 2023 23:38:18 -0400 Subject: [PATCH 023/170] Call widget callback on value control to fix primitive node issue. --- web/scripts/widgets.js | 1 + 1 file changed, 1 insertion(+) diff --git a/web/scripts/widgets.js b/web/scripts/widgets.js index 2b0239374..2b6747769 100644 --- a/web/scripts/widgets.js +++ b/web/scripts/widgets.js @@ -84,6 +84,7 @@ export function addValueControlWidget(node, targetWidget, defaultValue = "random if (targetWidget.value > max) targetWidget.value = max; + targetWidget.callback(targetWidget.value); } } return valueControl; From b935bea3a0201221eca7b0337bc60a329871300a Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 23 Oct 2023 21:13:50 -0400 Subject: [PATCH 024/170] The frontend can now load workflows from webp exif. --- web/scripts/app.js | 11 ++++- web/scripts/pnginfo.js | 103 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 113 insertions(+), 1 deletion(-) diff --git a/web/scripts/app.js b/web/scripts/app.js index 1d3b573b1..fca5b5bd3 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -3,7 +3,7 @@ import { ComfyWidgets } from "./widgets.js"; import { ComfyUI, $el } from "./ui.js"; import { api } from "./api.js"; import { defaultGraph } from "./defaultGraph.js"; -import { getPngMetadata, importA1111, getLatentMetadata } from "./pnginfo.js"; +import { getPngMetadata, getWebpMetadata, importA1111, getLatentMetadata } from "./pnginfo.js"; /** * @typedef {import("types/comfy").ComfyExtension} ComfyExtension @@ -1790,6 +1790,15 @@ export class ComfyApp { importA1111(this.graph, pngInfo.parameters); } } + } else if (file.type === "image/webp") { + const pngInfo = await getWebpMetadata(file); + if (pngInfo) { + if (pngInfo.workflow) { + this.loadGraphData(JSON.parse(pngInfo.workflow)); + } else if (pngInfo.Workflow) { + this.loadGraphData(JSON.parse(pngInfo.Workflow)); // Support loading workflows from that webp custom node. + } + } } else if (file.type === "application/json" || file.name?.endsWith(".json")) { const reader = new FileReader(); reader.onload = () => { diff --git a/web/scripts/pnginfo.js b/web/scripts/pnginfo.js index c5293dfa3..42573daa0 100644 --- a/web/scripts/pnginfo.js +++ b/web/scripts/pnginfo.js @@ -47,6 +47,109 @@ export function getPngMetadata(file) { }); } +function parseExifData(exifData) { + // Check for the correct TIFF header (0x4949 for little-endian or 0x4D4D for big-endian) + const isLittleEndian = new Uint16Array(exifData.slice(0, 2))[0] === 0x4949; + console.log(exifData); + + // Function to read 16-bit and 32-bit integers from binary data + function readInt(offset, isLittleEndian, length) { + let arr = exifData.slice(offset, offset + length) + if (length === 2) { + return new DataView(arr.buffer, arr.byteOffset, arr.byteLength).getUint16(0, isLittleEndian); + } else if (length === 4) { + return new DataView(arr.buffer, arr.byteOffset, arr.byteLength).getUint32(0, isLittleEndian); + } + } + + // Read the offset to the first IFD (Image File Directory) + const ifdOffset = readInt(4, isLittleEndian, 4); + + function parseIFD(offset) { + const numEntries = readInt(offset, isLittleEndian, 2); + const result = {}; + + for (let i = 0; i < numEntries; i++) { + const entryOffset = offset + 2 + i * 12; + const tag = readInt(entryOffset, isLittleEndian, 2); + const type = readInt(entryOffset + 2, isLittleEndian, 2); + const numValues = readInt(entryOffset + 4, isLittleEndian, 4); + const valueOffset = readInt(entryOffset + 8, isLittleEndian, 4); + + // Read the value(s) based on the data type + let value; + if (type === 2) { + // ASCII string + value = String.fromCharCode(...exifData.slice(valueOffset, valueOffset + numValues - 1)); + } + + result[tag] = value; + } + + return result; + } + + // Parse the first IFD + const ifdData = parseIFD(ifdOffset); + return ifdData; +} + +function splitValues(input) { + var output = {}; + for (var key in input) { + var value = input[key]; + var splitValues = value.split(':', 2); + output[splitValues[0]] = splitValues[1]; + } + return output; +} + +export function getWebpMetadata(file) { + return new Promise((r) => { + const reader = new FileReader(); + reader.onload = (event) => { + // Get the PNG data as a Uint8Array + const pngData = new Uint8Array(event.target.result); + const dataView = new DataView(pngData.buffer); + + // Check that the PNG signature is present + if (dataView.getUint32(0) !== 0x52494646 || dataView.getUint32(8) !== 0x57454250) { + console.error("Not a valid WEBP file"); + r(); + return; + } + + // Start searching for chunks after the PNG signature + let offset = 12; + let txt_chunks = {}; + // Loop through the chunks in the PNG file + while (offset < pngData.length) { + // Get the length of the chunk + const length = dataView.getUint32(offset + 4, true); + // Get the chunk type + const type = String.fromCharCode(...pngData.slice(offset, offset + 4)); + console.log(length, type); + if (type === "EXIF") { + // Get the keyword + let data = parseExifData(pngData.slice(offset + 8, offset + 8 + length)); + for (var key in data) { + var value = data[key]; + let index = value.indexOf(':'); + txt_chunks[value.slice(0, index)] = value.slice(index + 1); + } + } + + offset += 8 + length; + } + + console.log(txt_chunks); + r(txt_chunks); + }; + + reader.readAsArrayBuffer(file); + }); +} + export function getLatentMetadata(file) { return new Promise((r) => { const reader = new FileReader(); From 5c65da312a69ddbc34a2a1384b1118fd4e21776e Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 23 Oct 2023 23:39:22 -0400 Subject: [PATCH 025/170] Remove prints. --- web/scripts/pnginfo.js | 2 -- 1 file changed, 2 deletions(-) diff --git a/web/scripts/pnginfo.js b/web/scripts/pnginfo.js index 42573daa0..4dc3a032c 100644 --- a/web/scripts/pnginfo.js +++ b/web/scripts/pnginfo.js @@ -128,7 +128,6 @@ export function getWebpMetadata(file) { const length = dataView.getUint32(offset + 4, true); // Get the chunk type const type = String.fromCharCode(...pngData.slice(offset, offset + 4)); - console.log(length, type); if (type === "EXIF") { // Get the keyword let data = parseExifData(pngData.slice(offset + 8, offset + 8 + length)); @@ -142,7 +141,6 @@ export function getWebpMetadata(file) { offset += 8 + length; } - console.log(txt_chunks); r(txt_chunks); }; From 3fce8881ca0f24e268fac1dc6e85d2b4cbdb0355 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 24 Oct 2023 03:38:41 -0400 Subject: [PATCH 026/170] Sampling code refactor to make it easier to add more conds. --- comfy/samplers.py | 109 ++++++++++++++++++++++++++++------------------ 1 file changed, 66 insertions(+), 43 deletions(-) diff --git a/comfy/samplers.py b/comfy/samplers.py index 0b38fbd1e..f88b790d8 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -9,9 +9,58 @@ import math from comfy import model_base import comfy.utils + def lcm(a, b): #TODO: eventually replace by math.lcm (added in python3.9) return abs(a*b) // math.gcd(a, b) +class CONDRegular: + def __init__(self, cond): + self.cond = cond + + def can_concat(self, other): + if self.cond.shape != other.cond.shape: + return False + return True + + def concat(self, others): + conds = [self.cond] + for x in others: + conds.append(x.cond) + return torch.cat(conds) + +class CONDCrossAttn: + def __init__(self, cond): + self.cond = cond + + def can_concat(self, other): + s1 = self.cond.shape + s2 = other.cond.shape + if s1 != s2: + if s1[0] != s2[0] or s1[2] != s2[2]: #these 2 cases should not happen + return False + + mult_min = lcm(s1[1], s2[1]) + diff = mult_min // min(s1[1], s2[1]) + if diff > 4: #arbitrary limit on the padding because it's probably going to impact performance negatively if it's too much + return False + return True + + def concat(self, others): + conds = [self.cond] + crossattn_max_len = self.cond.shape[1] + for x in others: + c = x.cond + crossattn_max_len = lcm(crossattn_max_len, c.shape[1]) + conds.append(c) + + out = [] + for c in conds: + if c.shape[1] < crossattn_max_len: + c = c.repeat(1, crossattn_max_len // c.shape[1], 1) #padding with repeat doesn't change result + out.append(c) + return torch.cat(out) + + #The main sampling function shared by all the samplers #Returns predicted noise def sampling_function(model_function, x, timestep, uncond, cond, cond_scale, model_options={}, seed=None): @@ -67,7 +116,7 @@ def sampling_function(model_function, x, timestep, uncond, cond, cond_scale, mod mult[:,:,:,area[1] - 1 - t:area[1] - t] *= ((1.0/rr) * (t + 1)) conditionning = {} - conditionning['c_crossattn'] = cond[0] + conditionning['c_crossattn'] = CONDCrossAttn(cond[0]) if 'concat' in cond[1]: cond_concat_in = cond[1]['concat'] @@ -76,10 +125,10 @@ def sampling_function(model_function, x, timestep, uncond, cond, cond_scale, mod for x in cond_concat_in: cr = x[:,:,area[2]:area[0] + area[2],area[3]:area[1] + area[3]] cropped.append(cr) - conditionning['c_concat'] = torch.cat(cropped, dim=1) + conditionning['c_concat'] = CONDRegular(torch.cat(cropped, dim=1)) if adm_cond is not None: - conditionning['c_adm'] = adm_cond + conditionning['c_adm'] = CONDRegular(adm_cond) control = None if 'control' in cond[1]: @@ -105,22 +154,8 @@ def sampling_function(model_function, x, timestep, uncond, cond, cond_scale, mod return True if c1.keys() != c2.keys(): return False - if 'c_crossattn' in c1: - s1 = c1['c_crossattn'].shape - s2 = c2['c_crossattn'].shape - if s1 != s2: - if s1[0] != s2[0] or s1[2] != s2[2]: #these 2 cases should not happen - return False - - mult_min = lcm(s1[1], s2[1]) - diff = mult_min // min(s1[1], s2[1]) - if diff > 4: #arbitrary limit on the padding because it's probably going to impact performance negatively if it's too much - return False - if 'c_concat' in c1: - if c1['c_concat'].shape != c2['c_concat'].shape: - return False - if 'c_adm' in c1: - if c1['c_adm'].shape != c2['c_adm'].shape: + for k in c1: + if not c1[k].can_concat(c2[k]): return False return True @@ -149,31 +184,19 @@ def sampling_function(model_function, x, timestep, uncond, cond, cond_scale, mod c_concat = [] c_adm = [] crossattn_max_len = 0 - for x in c_list: - if 'c_crossattn' in x: - c = x['c_crossattn'] - if crossattn_max_len == 0: - crossattn_max_len = c.shape[1] - else: - crossattn_max_len = lcm(crossattn_max_len, c.shape[1]) - c_crossattn.append(c) - if 'c_concat' in x: - c_concat.append(x['c_concat']) - if 'c_adm' in x: - c_adm.append(x['c_adm']) - out = {} - c_crossattn_out = [] - for c in c_crossattn: - if c.shape[1] < crossattn_max_len: - c = c.repeat(1, crossattn_max_len // c.shape[1], 1) #padding with repeat doesn't change result - c_crossattn_out.append(c) - if len(c_crossattn_out) > 0: - out['c_crossattn'] = torch.cat(c_crossattn_out) - if len(c_concat) > 0: - out['c_concat'] = torch.cat(c_concat) - if len(c_adm) > 0: - out['c_adm'] = torch.cat(c_adm) + temp = {} + for x in c_list: + for k in x: + cur = temp.get(k, []) + cur.append(x[k]) + temp[k] = cur + + out = {} + for k in temp: + conds = temp[k] + out[k] = conds[0].concat(conds[1:]) + return out def calc_cond_uncond_batch(model_function, cond, uncond, x_in, timestep, max_total_area, model_options): From 036f88c62166a750ecfc88175d2f6836c5707e3b Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 24 Oct 2023 23:31:12 -0400 Subject: [PATCH 027/170] Refactor to make it easier to add custom conds to models. --- comfy/conds.py | 64 ++++++++++++ comfy/model_base.py | 14 ++- comfy/sample.py | 31 +++--- comfy/samplers.py | 234 +++++++++++++++----------------------------- 4 files changed, 170 insertions(+), 173 deletions(-) create mode 100644 comfy/conds.py diff --git a/comfy/conds.py b/comfy/conds.py new file mode 100644 index 000000000..1e3111baf --- /dev/null +++ b/comfy/conds.py @@ -0,0 +1,64 @@ +import enum +import torch +import math +import comfy.utils + + +def lcm(a, b): #TODO: eventually replace by math.lcm (added in python3.9) + return abs(a*b) // math.gcd(a, b) + +class CONDRegular: + def __init__(self, cond): + self.cond = cond + + def _copy_with(self, cond): + return self.__class__(cond) + + def process_cond(self, batch_size, device, **kwargs): + return self._copy_with(comfy.utils.repeat_to_batch_size(self.cond, batch_size).to(device)) + + def can_concat(self, other): + if self.cond.shape != other.cond.shape: + return False + return True + + def concat(self, others): + conds = [self.cond] + for x in others: + conds.append(x.cond) + return torch.cat(conds) + +class CONDNoiseShape(CONDRegular): + def process_cond(self, batch_size, device, area, **kwargs): + data = self.cond[:,:,area[2]:area[0] + area[2],area[3]:area[1] + area[3]] + return self._copy_with(comfy.utils.repeat_to_batch_size(data, batch_size).to(device)) + + +class CONDCrossAttn(CONDRegular): + def can_concat(self, other): + s1 = self.cond.shape + s2 = other.cond.shape + if s1 != s2: + if s1[0] != s2[0] or s1[2] != s2[2]: #these 2 cases should not happen + return False + + mult_min = lcm(s1[1], s2[1]) + diff = mult_min // min(s1[1], s2[1]) + if diff > 4: #arbitrary limit on the padding because it's probably going to impact performance negatively if it's too much + return False + return True + + def concat(self, others): + conds = [self.cond] + crossattn_max_len = self.cond.shape[1] + for x in others: + c = x.cond + crossattn_max_len = lcm(crossattn_max_len, c.shape[1]) + conds.append(c) + + out = [] + for c in conds: + if c.shape[1] < crossattn_max_len: + c = c.repeat(1, crossattn_max_len // c.shape[1], 1) #padding with repeat doesn't change result + out.append(c) + return torch.cat(out) diff --git a/comfy/model_base.py b/comfy/model_base.py index cda6765e4..edc246f8c 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -4,6 +4,7 @@ from comfy.ldm.modules.encoders.noise_aug_modules import CLIPEmbeddingNoiseAugme from comfy.ldm.modules.diffusionmodules.util import make_beta_schedule from comfy.ldm.modules.diffusionmodules.openaimodel import Timestep import comfy.model_management +import comfy.conds import numpy as np from enum import Enum from . import utils @@ -49,7 +50,7 @@ class BaseModel(torch.nn.Module): self.register_buffer('alphas_cumprod', torch.tensor(alphas_cumprod, dtype=torch.float32)) self.register_buffer('alphas_cumprod_prev', torch.tensor(alphas_cumprod_prev, dtype=torch.float32)) - def apply_model(self, x, t, c_concat=None, c_crossattn=None, c_adm=None, control=None, transformer_options={}): + def apply_model(self, x, t, c_concat=None, c_crossattn=None, c_adm=None, control=None, transformer_options={}, **kwargs): if c_concat is not None: xc = torch.cat([x] + [c_concat], dim=1) else: @@ -72,7 +73,8 @@ class BaseModel(torch.nn.Module): def encode_adm(self, **kwargs): return None - def cond_concat(self, **kwargs): + def extra_conds(self, **kwargs): + out = {} if self.inpaint_model: concat_keys = ("mask", "masked_image") cond_concat = [] @@ -101,8 +103,12 @@ class BaseModel(torch.nn.Module): cond_concat.append(torch.ones_like(noise)[:,:1]) elif ck == "masked_image": cond_concat.append(blank_inpaint_image_like(noise)) - return cond_concat - return None + data = torch.cat(cond_concat, dim=1) + out['c_concat'] = comfy.conds.CONDNoiseShape(data) + adm = self.encode_adm(**kwargs) + if adm is not None: + out['c_adm'] = comfy.conds.CONDRegular(adm) + return out def load_model_weights(self, sd, unet_prefix=""): to_load = {} diff --git a/comfy/sample.py b/comfy/sample.py index e6a69973d..b3fcd1658 100644 --- a/comfy/sample.py +++ b/comfy/sample.py @@ -1,6 +1,7 @@ import torch import comfy.model_management import comfy.samplers +import comfy.conds import comfy.utils import math import numpy as np @@ -33,22 +34,24 @@ def prepare_mask(noise_mask, shape, device): noise_mask = noise_mask.to(device) return noise_mask -def broadcast_cond(cond, batch, device): - """broadcasts conditioning to the batch size""" - copy = [] - for p in cond: - t = comfy.utils.repeat_to_batch_size(p[0], batch) - t = t.to(device) - copy += [[t] + p[1:]] - return copy - def get_models_from_cond(cond, model_type): models = [] for c in cond: - if model_type in c[1]: - models += [c[1][model_type]] + if model_type in c: + models += [c[model_type]] return models +def convert_cond(cond): + out = [] + for c in cond: + temp = c[1].copy() + model_conds = temp.get("model_conds", {}) + if c[0] is not None: + model_conds["c_crossattn"] = comfy.conds.CONDCrossAttn(c[0]) + temp["model_conds"] = model_conds + out.append(temp) + return out + def get_additional_models(positive, negative, dtype): """loads additional models in positive and negative conditioning""" control_nets = set(get_models_from_cond(positive, "control") + get_models_from_cond(negative, "control")) @@ -72,6 +75,8 @@ def cleanup_additional_models(models): def prepare_sampling(model, noise_shape, positive, negative, noise_mask): device = model.load_device + positive = convert_cond(positive) + negative = convert_cond(negative) if noise_mask is not None: noise_mask = prepare_mask(noise_mask, noise_shape, device) @@ -81,9 +86,7 @@ def prepare_sampling(model, noise_shape, positive, negative, noise_mask): comfy.model_management.load_models_gpu([model] + models, comfy.model_management.batch_area_memory(noise_shape[0] * noise_shape[2] * noise_shape[3]) + inference_memory) real_model = model.model - positive_copy = broadcast_cond(positive, noise_shape[0], device) - negative_copy = broadcast_cond(negative, noise_shape[0], device) - return real_model, positive_copy, negative_copy, noise_mask, models + return real_model, positive, negative, noise_mask, models def sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=1.0, disable_noise=False, start_step=None, last_step=None, force_full_denoise=False, noise_mask=None, sigmas=None, callback=None, disable_pbar=False, seed=None): diff --git a/comfy/samplers.py b/comfy/samplers.py index f88b790d8..f930aa39b 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -2,96 +2,44 @@ from .k_diffusion import sampling as k_diffusion_sampling from .k_diffusion import external as k_diffusion_external from .extra_samplers import uni_pc import torch +import enum from comfy import model_management from .ldm.models.diffusion.ddim import DDIMSampler from .ldm.modules.diffusionmodules.util import make_ddim_timesteps import math from comfy import model_base import comfy.utils - - -def lcm(a, b): #TODO: eventually replace by math.lcm (added in python3.9) - return abs(a*b) // math.gcd(a, b) - -class CONDRegular: - def __init__(self, cond): - self.cond = cond - - def can_concat(self, other): - if self.cond.shape != other.cond.shape: - return False - return True - - def concat(self, others): - conds = [self.cond] - for x in others: - conds.append(x.cond) - return torch.cat(conds) - -class CONDCrossAttn: - def __init__(self, cond): - self.cond = cond - - def can_concat(self, other): - s1 = self.cond.shape - s2 = other.cond.shape - if s1 != s2: - if s1[0] != s2[0] or s1[2] != s2[2]: #these 2 cases should not happen - return False - - mult_min = lcm(s1[1], s2[1]) - diff = mult_min // min(s1[1], s2[1]) - if diff > 4: #arbitrary limit on the padding because it's probably going to impact performance negatively if it's too much - return False - return True - - def concat(self, others): - conds = [self.cond] - crossattn_max_len = self.cond.shape[1] - for x in others: - c = x.cond - crossattn_max_len = lcm(crossattn_max_len, c.shape[1]) - conds.append(c) - - out = [] - for c in conds: - if c.shape[1] < crossattn_max_len: - c = c.repeat(1, crossattn_max_len // c.shape[1], 1) #padding with repeat doesn't change result - out.append(c) - return torch.cat(out) +import comfy.conds #The main sampling function shared by all the samplers #Returns predicted noise def sampling_function(model_function, x, timestep, uncond, cond, cond_scale, model_options={}, seed=None): - def get_area_and_mult(cond, x_in, timestep_in): + def get_area_and_mult(conds, x_in, timestep_in): area = (x_in.shape[2], x_in.shape[3], 0, 0) strength = 1.0 - if 'timestep_start' in cond[1]: - timestep_start = cond[1]['timestep_start'] + + if 'timestep_start' in conds: + timestep_start = conds['timestep_start'] if timestep_in[0] > timestep_start: return None - if 'timestep_end' in cond[1]: - timestep_end = cond[1]['timestep_end'] + if 'timestep_end' in conds: + timestep_end = conds['timestep_end'] if timestep_in[0] < timestep_end: return None - if 'area' in cond[1]: - area = cond[1]['area'] - if 'strength' in cond[1]: - strength = cond[1]['strength'] - - adm_cond = None - if 'adm_encoded' in cond[1]: - adm_cond = cond[1]['adm_encoded'] + if 'area' in conds: + area = conds['area'] + if 'strength' in conds: + strength = conds['strength'] input_x = x_in[:,:,area[2]:area[0] + area[2],area[3]:area[1] + area[3]] - if 'mask' in cond[1]: + if 'mask' in conds: # Scale the mask to the size of the input # The mask should have been resized as we began the sampling process mask_strength = 1.0 - if "mask_strength" in cond[1]: - mask_strength = cond[1]["mask_strength"] - mask = cond[1]['mask'] + if "mask_strength" in conds: + mask_strength = conds["mask_strength"] + mask = conds['mask'] assert(mask.shape[1] == x_in.shape[2]) assert(mask.shape[2] == x_in.shape[3]) mask = mask[:,area[2]:area[0] + area[2],area[3]:area[1] + area[3]] * mask_strength @@ -100,7 +48,7 @@ def sampling_function(model_function, x, timestep, uncond, cond, cond_scale, mod mask = torch.ones_like(input_x) mult = mask * strength - if 'mask' not in cond[1]: + if 'mask' not in conds: rr = 8 if area[2] != 0: for t in range(rr): @@ -116,27 +64,17 @@ def sampling_function(model_function, x, timestep, uncond, cond, cond_scale, mod mult[:,:,:,area[1] - 1 - t:area[1] - t] *= ((1.0/rr) * (t + 1)) conditionning = {} - conditionning['c_crossattn'] = CONDCrossAttn(cond[0]) - - if 'concat' in cond[1]: - cond_concat_in = cond[1]['concat'] - if cond_concat_in is not None and len(cond_concat_in) > 0: - cropped = [] - for x in cond_concat_in: - cr = x[:,:,area[2]:area[0] + area[2],area[3]:area[1] + area[3]] - cropped.append(cr) - conditionning['c_concat'] = CONDRegular(torch.cat(cropped, dim=1)) - - if adm_cond is not None: - conditionning['c_adm'] = CONDRegular(adm_cond) + model_conds = conds["model_conds"] + for c in model_conds: + conditionning[c] = model_conds[c].process_cond(batch_size=x_in.shape[0], device=x_in.device, area=area) control = None - if 'control' in cond[1]: - control = cond[1]['control'] + if 'control' in conds: + control = conds['control'] patches = None - if 'gligen' in cond[1]: - gligen = cond[1]['gligen'] + if 'gligen' in conds: + gligen = conds['gligen'] patches = {} gligen_type = gligen[0] gligen_model = gligen[1] @@ -412,19 +350,19 @@ def resolve_areas_and_cond_masks(conditions, h, w, device): # While we're doing this, we can also resolve the mask device and scaling for performance reasons for i in range(len(conditions)): c = conditions[i] - if 'area' in c[1]: - area = c[1]['area'] + if 'area' in c: + area = c['area'] if area[0] == "percentage": - modified = c[1].copy() + modified = c.copy() area = (max(1, round(area[1] * h)), max(1, round(area[2] * w)), round(area[3] * h), round(area[4] * w)) modified['area'] = area - c = [c[0], modified] + c = modified conditions[i] = c - if 'mask' in c[1]: - mask = c[1]['mask'] + if 'mask' in c: + mask = c['mask'] mask = mask.to(device=device) - modified = c[1].copy() + modified = c.copy() if len(mask.shape) == 2: mask = mask.unsqueeze(0) if mask.shape[1] != h or mask.shape[2] != w: @@ -445,37 +383,39 @@ def resolve_areas_and_cond_masks(conditions, h, w, device): modified['area'] = area modified['mask'] = mask - conditions[i] = [c[0], modified] + conditions[i] = modified def create_cond_with_same_area_if_none(conds, c): - if 'area' not in c[1]: + if 'area' not in c: return - c_area = c[1]['area'] + c_area = c['area'] smallest = None for x in conds: - if 'area' in x[1]: - a = x[1]['area'] + if 'area' in x: + a = x['area'] if c_area[2] >= a[2] and c_area[3] >= a[3]: if a[0] + a[2] >= c_area[0] + c_area[2]: if a[1] + a[3] >= c_area[1] + c_area[3]: if smallest is None: smallest = x - elif 'area' not in smallest[1]: + elif 'area' not in smallest: smallest = x else: - if smallest[1]['area'][0] * smallest[1]['area'][1] > a[0] * a[1]: + if smallest['area'][0] * smallest['area'][1] > a[0] * a[1]: smallest = x else: if smallest is None: smallest = x if smallest is None: return - if 'area' in smallest[1]: - if smallest[1]['area'] == c_area: + if 'area' in smallest: + if smallest['area'] == c_area: return - n = c[1].copy() - conds += [[smallest[0], n]] + + out = c.copy() + out['model_conds'] = smallest['model_conds'].copy() #TODO: which fields should be copied? + conds += [out] def calculate_start_end_timesteps(model, conds): for t in range(len(conds)): @@ -483,18 +423,18 @@ def calculate_start_end_timesteps(model, conds): timestep_start = None timestep_end = None - if 'start_percent' in x[1]: - timestep_start = model.sigma_to_t(model.t_to_sigma(torch.tensor(x[1]['start_percent'] * 999.0))) - if 'end_percent' in x[1]: - timestep_end = model.sigma_to_t(model.t_to_sigma(torch.tensor(x[1]['end_percent'] * 999.0))) + if 'start_percent' in x: + timestep_start = model.sigma_to_t(model.t_to_sigma(torch.tensor(x['start_percent'] * 999.0))) + if 'end_percent' in x: + timestep_end = model.sigma_to_t(model.t_to_sigma(torch.tensor(x['end_percent'] * 999.0))) if (timestep_start is not None) or (timestep_end is not None): - n = x[1].copy() + n = x.copy() if (timestep_start is not None): n['timestep_start'] = timestep_start if (timestep_end is not None): n['timestep_end'] = timestep_end - conds[t] = [x[0], n] + conds[t] = n def pre_run_control(model, conds): for t in range(len(conds)): @@ -503,8 +443,8 @@ def pre_run_control(model, conds): timestep_start = None timestep_end = None percent_to_timestep_function = lambda a: model.sigma_to_t(model.t_to_sigma(torch.tensor(a) * 999.0)) - if 'control' in x[1]: - x[1]['control'].pre_run(model.inner_model.inner_model, percent_to_timestep_function) + if 'control' in x: + x['control'].pre_run(model.inner_model.inner_model, percent_to_timestep_function) def apply_empty_x_to_equal_area(conds, uncond, name, uncond_fill_func): cond_cnets = [] @@ -513,16 +453,16 @@ def apply_empty_x_to_equal_area(conds, uncond, name, uncond_fill_func): uncond_other = [] for t in range(len(conds)): x = conds[t] - if 'area' not in x[1]: - if name in x[1] and x[1][name] is not None: - cond_cnets.append(x[1][name]) + if 'area' not in x: + if name in x and x[name] is not None: + cond_cnets.append(x[name]) else: cond_other.append((x, t)) for t in range(len(uncond)): x = uncond[t] - if 'area' not in x[1]: - if name in x[1] and x[1][name] is not None: - uncond_cnets.append(x[1][name]) + if 'area' not in x: + if name in x and x[name] is not None: + uncond_cnets.append(x[name]) else: uncond_other.append((x, t)) @@ -532,47 +472,35 @@ def apply_empty_x_to_equal_area(conds, uncond, name, uncond_fill_func): for x in range(len(cond_cnets)): temp = uncond_other[x % len(uncond_other)] o = temp[0] - if name in o[1] and o[1][name] is not None: - n = o[1].copy() + if name in o and o[name] is not None: + n = o.copy() n[name] = uncond_fill_func(cond_cnets, x) - uncond += [[o[0], n]] + uncond += [n] else: - n = o[1].copy() + n = o.copy() n[name] = uncond_fill_func(cond_cnets, x) - uncond[temp[1]] = [o[0], n] + uncond[temp[1]] = n -def encode_adm(model, conds, batch_size, width, height, device, prompt_type): +def encode_model_conds(model_function, conds, noise, device, prompt_type, **kwargs): for t in range(len(conds)): x = conds[t] - adm_out = None - if 'adm' in x[1]: - adm_out = x[1]["adm"] - else: - params = x[1].copy() - params["width"] = params.get("width", width * 8) - params["height"] = params.get("height", height * 8) - params["prompt_type"] = params.get("prompt_type", prompt_type) - adm_out = model.encode_adm(device=device, **params) - - if adm_out is not None: - x[1] = x[1].copy() - x[1]["adm_encoded"] = comfy.utils.repeat_to_batch_size(adm_out, batch_size).to(device) - - return conds - -def encode_cond(model_function, key, conds, device, **kwargs): - for t in range(len(conds)): - x = conds[t] - params = x[1].copy() + params = x.copy() params["device"] = device + params["noise"] = noise + params["width"] = params.get("width", noise.shape[3] * 8) + params["height"] = params.get("height", noise.shape[2] * 8) + params["prompt_type"] = params.get("prompt_type", prompt_type) for k in kwargs: if k not in params: params[k] = kwargs[k] out = model_function(**params) - if out is not None: - x[1] = x[1].copy() - x[1][key] = out + x = x.copy() + model_conds = x['model_conds'].copy() + for k in out: + model_conds[k] = out[k] + x['model_conds'] = model_conds + conds[t] = x return conds class Sampler: @@ -690,19 +618,15 @@ def sample(model, noise, positive, negative, cfg, device, sampler, sigmas, model pre_run_control(model_wrap, negative + positive) - apply_empty_x_to_equal_area(list(filter(lambda c: c[1].get('control_apply_to_uncond', False) == True, positive)), negative, 'control', lambda cond_cnets, x: cond_cnets[x]) + apply_empty_x_to_equal_area(list(filter(lambda c: c.get('control_apply_to_uncond', False) == True, positive)), negative, 'control', lambda cond_cnets, x: cond_cnets[x]) apply_empty_x_to_equal_area(positive, negative, 'gligen', lambda cond_cnets, x: cond_cnets[x]) if latent_image is not None: latent_image = model.process_latent_in(latent_image) - if model.is_adm(): - positive = encode_adm(model, positive, noise.shape[0], noise.shape[3], noise.shape[2], device, "positive") - negative = encode_adm(model, negative, noise.shape[0], noise.shape[3], noise.shape[2], device, "negative") - - if hasattr(model, 'cond_concat'): - positive = encode_cond(model.cond_concat, "concat", positive, device, noise=noise, latent_image=latent_image, denoise_mask=denoise_mask) - negative = encode_cond(model.cond_concat, "concat", negative, device, noise=noise, latent_image=latent_image, denoise_mask=denoise_mask) + if hasattr(model, 'extra_conds'): + positive = encode_model_conds(model.extra_conds, positive, noise, device, "positive", latent_image=latent_image, denoise_mask=denoise_mask) + negative = encode_model_conds(model.extra_conds, negative, noise, device, "negative", latent_image=latent_image, denoise_mask=denoise_mask) extra_args = {"cond":positive, "uncond":negative, "cond_scale": cfg, "model_options": model_options, "seed":seed} From d1d2fea806c07b1519634ec6dbc8c7f60dee8f4e Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 25 Oct 2023 00:07:53 -0400 Subject: [PATCH 028/170] Pass extra conds directly to unet. --- comfy/model_base.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/comfy/model_base.py b/comfy/model_base.py index edc246f8c..ea3ea61f2 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -50,7 +50,7 @@ class BaseModel(torch.nn.Module): self.register_buffer('alphas_cumprod', torch.tensor(alphas_cumprod, dtype=torch.float32)) self.register_buffer('alphas_cumprod_prev', torch.tensor(alphas_cumprod_prev, dtype=torch.float32)) - def apply_model(self, x, t, c_concat=None, c_crossattn=None, c_adm=None, control=None, transformer_options={}, **kwargs): + def apply_model(self, x, t, c_concat=None, c_crossattn=None, control=None, transformer_options={}, **kwargs): if c_concat is not None: xc = torch.cat([x] + [c_concat], dim=1) else: @@ -60,9 +60,10 @@ class BaseModel(torch.nn.Module): xc = xc.to(dtype) t = t.to(dtype) context = context.to(dtype) - if c_adm is not None: - c_adm = c_adm.to(dtype) - return self.diffusion_model(xc, t, context=context, y=c_adm, control=control, transformer_options=transformer_options).float() + extra_conds = {} + for o in kwargs: + extra_conds[o] = kwargs[o].to(dtype) + return self.diffusion_model(xc, t, context=context, control=control, transformer_options=transformer_options, **extra_conds).float() def get_dtype(self): return self.diffusion_model.dtype @@ -107,7 +108,7 @@ class BaseModel(torch.nn.Module): out['c_concat'] = comfy.conds.CONDNoiseShape(data) adm = self.encode_adm(**kwargs) if adm is not None: - out['c_adm'] = comfy.conds.CONDRegular(adm) + out['y'] = comfy.conds.CONDRegular(adm) return out def load_model_weights(self, sd, unet_prefix=""): From 3783cb8bfd4bc0a688a565319257931f4737a958 Mon Sep 17 00:00:00 2001 From: Jedrzej Kosinski Date: Wed, 25 Oct 2023 08:24:32 -0500 Subject: [PATCH 029/170] change 'c_adm' to 'y' in ControlNet.get_control --- comfy/controlnet.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/controlnet.py b/comfy/controlnet.py index f1355e64e..2a88dd019 100644 --- a/comfy/controlnet.py +++ b/comfy/controlnet.py @@ -156,7 +156,7 @@ class ControlNet(ControlBase): context = cond['c_crossattn'] - y = cond.get('c_adm', None) + y = cond.get('y', None) if y is not None: y = y.to(self.control_model.dtype) control = self.control_model(x=x_noisy.to(self.control_model.dtype), hint=self.cond_hint, timesteps=t, context=context.to(self.control_model.dtype), y=y) From 7fbb217d3a46fc117dd78b90191e528139fb851a Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 25 Oct 2023 16:08:30 -0400 Subject: [PATCH 030/170] Fix uni_pc returning noisy image when steps <= 3 --- comfy/extra_samplers/uni_pc.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/extra_samplers/uni_pc.py b/comfy/extra_samplers/uni_pc.py index 58e030d04..9d5f0c60b 100644 --- a/comfy/extra_samplers/uni_pc.py +++ b/comfy/extra_samplers/uni_pc.py @@ -881,7 +881,7 @@ def sample_unipc(model, noise, image, sigmas, sampling_function, max_denoise, ex model_kwargs=extra_args, ) - order = min(3, len(timesteps) - 1) + order = min(3, len(timesteps) - 2) uni_pc = UniPC(model_fn, ns, predict_x0=True, thresholding=False, noise_mask=noise_mask, masked_image=image, noise=noise, variant=variant) x = uni_pc.sample(img, timesteps=timesteps, skip_type="time_uniform", method="multistep", order=order, lower_order_final=True, callback=callback, disable_pbar=disable) x /= ns.marginal_alpha(timesteps[-1]) From a373367b0cf37e9c67b30d21c207417dedfffd4f Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 25 Oct 2023 20:17:28 -0400 Subject: [PATCH 031/170] Fix some OOM issues with split and sub quad attention. --- comfy/ldm/modules/attention.py | 9 +++++++-- comfy/ldm/modules/sub_quadratic_attention.py | 3 ++- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/comfy/ldm/modules/attention.py b/comfy/ldm/modules/attention.py index dcf467489..4f10bbc35 100644 --- a/comfy/ldm/modules/attention.py +++ b/comfy/ldm/modules/attention.py @@ -222,9 +222,14 @@ def attention_split(q, k, v, heads, mask=None): mem_free_total = model_management.get_free_memory(q.device) + if _ATTN_PRECISION =="fp32": + element_size = 4 + else: + element_size = q.element_size() + gb = 1024 ** 3 - tensor_size = q.shape[0] * q.shape[1] * k.shape[1] * q.element_size() - modifier = 3 if q.element_size() == 2 else 2.5 + tensor_size = q.shape[0] * q.shape[1] * k.shape[1] * element_size + modifier = 3 if element_size == 2 else 2.5 mem_required = tensor_size * modifier steps = 1 diff --git a/comfy/ldm/modules/sub_quadratic_attention.py b/comfy/ldm/modules/sub_quadratic_attention.py index 4d42059b5..8e8e8054d 100644 --- a/comfy/ldm/modules/sub_quadratic_attention.py +++ b/comfy/ldm/modules/sub_quadratic_attention.py @@ -83,7 +83,8 @@ def _summarize_chunk( ) max_score, _ = torch.max(attn_weights, -1, keepdim=True) max_score = max_score.detach() - torch.exp(attn_weights - max_score, out=attn_weights) + attn_weights -= max_score + torch.exp(attn_weights, out=attn_weights) exp_weights = attn_weights.to(value.dtype) exp_values = torch.bmm(exp_weights, value) max_score = max_score.squeeze(-1) From 723847f6b3d5da21e5d712bc0139fb7197ba60a4 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 26 Oct 2023 01:53:01 -0400 Subject: [PATCH 032/170] Faster clip image processing. --- comfy/clip_vision.py | 31 +++++++++++++++---------------- 1 file changed, 15 insertions(+), 16 deletions(-) diff --git a/comfy/clip_vision.py b/comfy/clip_vision.py index e085186ef..9e2e03d72 100644 --- a/comfy/clip_vision.py +++ b/comfy/clip_vision.py @@ -1,5 +1,5 @@ -from transformers import CLIPVisionModelWithProjection, CLIPVisionConfig, CLIPImageProcessor, modeling_utils -from .utils import load_torch_file, transformers_convert +from transformers import CLIPVisionModelWithProjection, CLIPVisionConfig, modeling_utils +from .utils import load_torch_file, transformers_convert, common_upscale import os import torch import contextlib @@ -7,6 +7,18 @@ import contextlib import comfy.ops import comfy.model_patcher import comfy.model_management +import comfy.utils + +def clip_preprocess(image, size=224): + mean = torch.tensor([ 0.48145466,0.4578275,0.40821073], device=image.device, dtype=image.dtype) + std = torch.tensor([0.26862954,0.26130258,0.27577711], device=image.device, dtype=image.dtype) + scale = (size / min(image.shape[1], image.shape[2])) + image = torch.nn.functional.interpolate(image.movedim(-1, 1), size=(round(scale * image.shape[1]), round(scale * image.shape[2])), mode="bicubic", antialias=True) + h = (image.shape[2] - size)//2 + w = (image.shape[3] - size)//2 + image = image[:,:,h:h+size,w:w+size] + image = torch.clip((255. * image), 0, 255).round() / 255.0 + return (image - mean.view([3,1,1])) / std.view([3,1,1]) class ClipVisionModel(): def __init__(self, json_config): @@ -23,25 +35,12 @@ class ClipVisionModel(): self.model.to(self.dtype) self.patcher = comfy.model_patcher.ModelPatcher(self.model, load_device=self.load_device, offload_device=offload_device) - self.processor = CLIPImageProcessor(crop_size=224, - do_center_crop=True, - do_convert_rgb=True, - do_normalize=True, - do_resize=True, - image_mean=[ 0.48145466,0.4578275,0.40821073], - image_std=[0.26862954,0.26130258,0.27577711], - resample=3, #bicubic - size=224) - def load_sd(self, sd): return self.model.load_state_dict(sd, strict=False) def encode_image(self, image): - img = torch.clip((255. * image), 0, 255).round().int() - img = list(map(lambda a: a, img)) - inputs = self.processor(images=img, return_tensors="pt") comfy.model_management.load_model_gpu(self.patcher) - pixel_values = inputs['pixel_values'].to(self.load_device) + pixel_values = clip_preprocess(image.to(self.load_device)) if self.dtype != torch.float32: precision_scope = torch.autocast From 40963b5a16f717a636c98dae0055224938852c6a Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 26 Oct 2023 19:52:41 -0400 Subject: [PATCH 033/170] Apply primitive nodes to graph before serializing workflow. --- web/scripts/app.js | 14 ++++++++++---- web/scripts/ui.js | 28 +++++++++++++++------------- 2 files changed, 25 insertions(+), 17 deletions(-) diff --git a/web/scripts/app.js b/web/scripts/app.js index fca5b5bd3..583310a27 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -1586,6 +1586,16 @@ export class ComfyApp { * @returns The workflow and node links */ async graphToPrompt() { + for (const node of this.graph.computeExecutionOrder(false)) { + if (node.isVirtualNode) { + // Don't serialize frontend only nodes but let them make changes + if (node.applyToGraph) { + node.applyToGraph(); + } + continue; + } + } + const workflow = this.graph.serialize(); const output = {}; // Process nodes in order of execution @@ -1593,10 +1603,6 @@ export class ComfyApp { const n = workflow.nodes.find((n) => n.id === node.id); if (node.isVirtualNode) { - // Don't serialize frontend only nodes but let them make changes - if (node.applyToGraph) { - node.applyToGraph(workflow); - } continue; } diff --git a/web/scripts/ui.js b/web/scripts/ui.js index c3b3fbda1..6f01aa5b2 100644 --- a/web/scripts/ui.js +++ b/web/scripts/ui.js @@ -719,20 +719,22 @@ export class ComfyUI { filename += ".json"; } } - const json = JSON.stringify(app.graph.serialize(), null, 2); // convert the data to a JSON string - const blob = new Blob([json], {type: "application/json"}); - const url = URL.createObjectURL(blob); - const a = $el("a", { - href: url, - download: filename, - style: {display: "none"}, - parent: document.body, + app.graphToPrompt().then(p=>{ + const json = JSON.stringify(p.workflow, null, 2); // convert the data to a JSON string + const blob = new Blob([json], {type: "application/json"}); + const url = URL.createObjectURL(blob); + const a = $el("a", { + href: url, + download: filename, + style: {display: "none"}, + parent: document.body, + }); + a.click(); + setTimeout(function () { + a.remove(); + window.URL.revokeObjectURL(url); + }, 0); }); - a.click(); - setTimeout(function () { - a.remove(); - window.URL.revokeObjectURL(url); - }, 0); }, }), $el("button", { From 434ce25ec00719ec67372482af2f0e6e517d548a Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 27 Oct 2023 02:42:14 -0400 Subject: [PATCH 034/170] Restrict loading embeddings from embedding folders. --- comfy/sd1_clip.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/comfy/sd1_clip.py b/comfy/sd1_clip.py index 9978b6c35..ffe2bd3bd 100644 --- a/comfy/sd1_clip.py +++ b/comfy/sd1_clip.py @@ -278,7 +278,13 @@ def load_embed(embedding_name, embedding_directory, embedding_size, embed_key=No valid_file = None for embed_dir in embedding_directory: - embed_path = os.path.join(embed_dir, embedding_name) + embed_path = os.path.abspath(os.path.join(embed_dir, embedding_name)) + embed_dir = os.path.abspath(embed_dir) + try: + if os.path.commonpath((embed_dir, embed_path)) != embed_dir: + continue + except: + continue if not os.path.isfile(embed_path): extensions = ['.safetensors', '.pt', '.bin'] for x in extensions: From 6ec3f12c6e2e1d214c41f5713308818541da52a4 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 27 Oct 2023 14:15:45 -0400 Subject: [PATCH 035/170] Support SSD1B model and make it easier to support asymmetric unets. --- comfy/cldm/cldm.py | 47 ++++---- .../modules/diffusionmodules/openaimodel.py | 42 +++---- comfy/model_detection.py | 112 ++++++++++++++---- comfy/sd.py | 2 +- comfy/supported_models.py | 16 ++- comfy/utils.py | 30 ++--- 6 files changed, 153 insertions(+), 96 deletions(-) diff --git a/comfy/cldm/cldm.py b/comfy/cldm/cldm.py index f982d648c..9a63202ab 100644 --- a/comfy/cldm/cldm.py +++ b/comfy/cldm/cldm.py @@ -27,7 +27,6 @@ class ControlNet(nn.Module): model_channels, hint_channels, num_res_blocks, - attention_resolutions, dropout=0, channel_mult=(1, 2, 4, 8), conv_resample=True, @@ -52,6 +51,7 @@ class ControlNet(nn.Module): use_linear_in_transformer=False, adm_in_channels=None, transformer_depth_middle=None, + transformer_depth_output=None, device=None, operations=comfy.ops, ): @@ -79,10 +79,7 @@ class ControlNet(nn.Module): self.image_size = image_size self.in_channels = in_channels self.model_channels = model_channels - if isinstance(transformer_depth, int): - transformer_depth = len(channel_mult) * [transformer_depth] - if transformer_depth_middle is None: - transformer_depth_middle = transformer_depth[-1] + if isinstance(num_res_blocks, int): self.num_res_blocks = len(channel_mult) * [num_res_blocks] else: @@ -90,18 +87,16 @@ class ControlNet(nn.Module): raise ValueError("provide num_res_blocks either as an int (globally constant) or " "as a list/tuple (per-level) with the same length as channel_mult") self.num_res_blocks = num_res_blocks + if disable_self_attentions is not None: # should be a list of booleans, indicating whether to disable self-attention in TransformerBlocks or not assert len(disable_self_attentions) == len(channel_mult) if num_attention_blocks is not None: assert len(num_attention_blocks) == len(self.num_res_blocks) assert all(map(lambda i: self.num_res_blocks[i] >= num_attention_blocks[i], range(len(num_attention_blocks)))) - print(f"Constructor of UNetModel received num_attention_blocks={num_attention_blocks}. " - f"This option has LESS priority than attention_resolutions {attention_resolutions}, " - f"i.e., in cases where num_attention_blocks[i] > 0 but 2**i not in attention_resolutions, " - f"attention will still not be set.") - self.attention_resolutions = attention_resolutions + transformer_depth = transformer_depth[:] + self.dropout = dropout self.channel_mult = channel_mult self.conv_resample = conv_resample @@ -180,11 +175,14 @@ class ControlNet(nn.Module): dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, - operations=operations + dtype=self.dtype, + device=device, + operations=operations, ) ] ch = mult * model_channels - if ds in attention_resolutions: + num_transformers = transformer_depth.pop(0) + if num_transformers > 0: if num_head_channels == -1: dim_head = ch // num_heads else: @@ -201,9 +199,9 @@ class ControlNet(nn.Module): if not exists(num_attention_blocks) or nr < num_attention_blocks[level]: layers.append( SpatialTransformer( - ch, num_heads, dim_head, depth=transformer_depth[level], context_dim=context_dim, + ch, num_heads, dim_head, depth=num_transformers, context_dim=context_dim, disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer, - use_checkpoint=use_checkpoint, operations=operations + use_checkpoint=use_checkpoint, dtype=self.dtype, device=device, operations=operations ) ) self.input_blocks.append(TimestepEmbedSequential(*layers)) @@ -223,11 +221,13 @@ class ControlNet(nn.Module): use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, down=True, + dtype=self.dtype, + device=device, operations=operations ) if resblock_updown else Downsample( - ch, conv_resample, dims=dims, out_channels=out_ch, operations=operations + ch, conv_resample, dims=dims, out_channels=out_ch, dtype=self.dtype, device=device, operations=operations ) ) ) @@ -245,7 +245,7 @@ class ControlNet(nn.Module): if legacy: #num_heads = 1 dim_head = ch // num_heads if use_spatial_transformer else num_head_channels - self.middle_block = TimestepEmbedSequential( + mid_block = [ ResBlock( ch, time_embed_dim, @@ -253,12 +253,15 @@ class ControlNet(nn.Module): dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, + dtype=self.dtype, + device=device, operations=operations - ), - SpatialTransformer( # always uses a self-attn + )] + if transformer_depth_middle >= 0: + mid_block += [SpatialTransformer( # always uses a self-attn ch, num_heads, dim_head, depth=transformer_depth_middle, context_dim=context_dim, disable_self_attn=disable_middle_self_attn, use_linear=use_linear_in_transformer, - use_checkpoint=use_checkpoint, operations=operations + use_checkpoint=use_checkpoint, dtype=self.dtype, device=device, operations=operations ), ResBlock( ch, @@ -267,9 +270,11 @@ class ControlNet(nn.Module): dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, + dtype=self.dtype, + device=device, operations=operations - ), - ) + )] + self.middle_block = TimestepEmbedSequential(*mid_block) self.middle_block_out = self.make_zero_conv(ch, operations=operations) self._feature_size += ch diff --git a/comfy/ldm/modules/diffusionmodules/openaimodel.py b/comfy/ldm/modules/diffusionmodules/openaimodel.py index bf58a4045..7dfdfc0a2 100644 --- a/comfy/ldm/modules/diffusionmodules/openaimodel.py +++ b/comfy/ldm/modules/diffusionmodules/openaimodel.py @@ -259,10 +259,6 @@ class UNetModel(nn.Module): :param model_channels: base channel count for the model. :param out_channels: channels in the output Tensor. :param num_res_blocks: number of residual blocks per downsample. - :param attention_resolutions: a collection of downsample rates at which - attention will take place. May be a set, list, or tuple. - For example, if this contains 4, then at 4x downsampling, attention - will be used. :param dropout: the dropout probability. :param channel_mult: channel multiplier for each level of the UNet. :param conv_resample: if True, use learned convolutions for upsampling and @@ -289,7 +285,6 @@ class UNetModel(nn.Module): model_channels, out_channels, num_res_blocks, - attention_resolutions, dropout=0, channel_mult=(1, 2, 4, 8), conv_resample=True, @@ -314,6 +309,7 @@ class UNetModel(nn.Module): use_linear_in_transformer=False, adm_in_channels=None, transformer_depth_middle=None, + transformer_depth_output=None, device=None, operations=comfy.ops, ): @@ -341,10 +337,7 @@ class UNetModel(nn.Module): self.in_channels = in_channels self.model_channels = model_channels self.out_channels = out_channels - if isinstance(transformer_depth, int): - transformer_depth = len(channel_mult) * [transformer_depth] - if transformer_depth_middle is None: - transformer_depth_middle = transformer_depth[-1] + if isinstance(num_res_blocks, int): self.num_res_blocks = len(channel_mult) * [num_res_blocks] else: @@ -352,18 +345,16 @@ class UNetModel(nn.Module): raise ValueError("provide num_res_blocks either as an int (globally constant) or " "as a list/tuple (per-level) with the same length as channel_mult") self.num_res_blocks = num_res_blocks + if disable_self_attentions is not None: # should be a list of booleans, indicating whether to disable self-attention in TransformerBlocks or not assert len(disable_self_attentions) == len(channel_mult) if num_attention_blocks is not None: assert len(num_attention_blocks) == len(self.num_res_blocks) - assert all(map(lambda i: self.num_res_blocks[i] >= num_attention_blocks[i], range(len(num_attention_blocks)))) - print(f"Constructor of UNetModel received num_attention_blocks={num_attention_blocks}. " - f"This option has LESS priority than attention_resolutions {attention_resolutions}, " - f"i.e., in cases where num_attention_blocks[i] > 0 but 2**i not in attention_resolutions, " - f"attention will still not be set.") - self.attention_resolutions = attention_resolutions + transformer_depth = transformer_depth[:] + transformer_depth_output = transformer_depth_output[:] + self.dropout = dropout self.channel_mult = channel_mult self.conv_resample = conv_resample @@ -428,7 +419,8 @@ class UNetModel(nn.Module): ) ] ch = mult * model_channels - if ds in attention_resolutions: + num_transformers = transformer_depth.pop(0) + if num_transformers > 0: if num_head_channels == -1: dim_head = ch // num_heads else: @@ -444,7 +436,7 @@ class UNetModel(nn.Module): if not exists(num_attention_blocks) or nr < num_attention_blocks[level]: layers.append(SpatialTransformer( - ch, num_heads, dim_head, depth=transformer_depth[level], context_dim=context_dim, + ch, num_heads, dim_head, depth=num_transformers, context_dim=context_dim, disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer, use_checkpoint=use_checkpoint, dtype=self.dtype, device=device, operations=operations ) @@ -488,7 +480,7 @@ class UNetModel(nn.Module): if legacy: #num_heads = 1 dim_head = ch // num_heads if use_spatial_transformer else num_head_channels - self.middle_block = TimestepEmbedSequential( + mid_block = [ ResBlock( ch, time_embed_dim, @@ -499,8 +491,9 @@ class UNetModel(nn.Module): dtype=self.dtype, device=device, operations=operations - ), - SpatialTransformer( # always uses a self-attn + )] + if transformer_depth_middle >= 0: + mid_block += [SpatialTransformer( # always uses a self-attn ch, num_heads, dim_head, depth=transformer_depth_middle, context_dim=context_dim, disable_self_attn=disable_middle_self_attn, use_linear=use_linear_in_transformer, use_checkpoint=use_checkpoint, dtype=self.dtype, device=device, operations=operations @@ -515,8 +508,8 @@ class UNetModel(nn.Module): dtype=self.dtype, device=device, operations=operations - ), - ) + )] + self.middle_block = TimestepEmbedSequential(*mid_block) self._feature_size += ch self.output_blocks = nn.ModuleList([]) @@ -538,7 +531,8 @@ class UNetModel(nn.Module): ) ] ch = model_channels * mult - if ds in attention_resolutions: + num_transformers = transformer_depth_output.pop() + if num_transformers > 0: if num_head_channels == -1: dim_head = ch // num_heads else: @@ -555,7 +549,7 @@ class UNetModel(nn.Module): if not exists(num_attention_blocks) or i < num_attention_blocks[level]: layers.append( SpatialTransformer( - ch, num_heads, dim_head, depth=transformer_depth[level], context_dim=context_dim, + ch, num_heads, dim_head, depth=num_transformers, context_dim=context_dim, disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer, use_checkpoint=use_checkpoint, dtype=self.dtype, device=device, operations=operations ) diff --git a/comfy/model_detection.py b/comfy/model_detection.py index 0ff2e7fb5..4f4e0b3b7 100644 --- a/comfy/model_detection.py +++ b/comfy/model_detection.py @@ -14,6 +14,19 @@ def count_blocks(state_dict_keys, prefix_string): count += 1 return count +def calculate_transformer_depth(prefix, state_dict_keys, state_dict): + context_dim = None + use_linear_in_transformer = False + + transformer_prefix = prefix + "1.transformer_blocks." + transformer_keys = sorted(list(filter(lambda a: a.startswith(transformer_prefix), state_dict_keys))) + if len(transformer_keys) > 0: + last_transformer_depth = count_blocks(state_dict_keys, transformer_prefix + '{}') + context_dim = state_dict['{}0.attn2.to_k.weight'.format(transformer_prefix)].shape[1] + use_linear_in_transformer = len(state_dict['{}1.proj_in.weight'.format(prefix)].shape) == 2 + return last_transformer_depth, context_dim, use_linear_in_transformer + return None + def detect_unet_config(state_dict, key_prefix, dtype): state_dict_keys = list(state_dict.keys()) @@ -40,6 +53,7 @@ def detect_unet_config(state_dict, key_prefix, dtype): channel_mult = [] attention_resolutions = [] transformer_depth = [] + transformer_depth_output = [] context_dim = None use_linear_in_transformer = False @@ -48,60 +62,67 @@ def detect_unet_config(state_dict, key_prefix, dtype): count = 0 last_res_blocks = 0 - last_transformer_depth = 0 last_channel_mult = 0 - while True: + input_block_count = count_blocks(state_dict_keys, '{}input_blocks'.format(key_prefix) + '.{}.') + for count in range(input_block_count): prefix = '{}input_blocks.{}.'.format(key_prefix, count) + prefix_output = '{}output_blocks.{}.'.format(key_prefix, input_block_count - count - 1) + block_keys = sorted(list(filter(lambda a: a.startswith(prefix), state_dict_keys))) if len(block_keys) == 0: break + block_keys_output = sorted(list(filter(lambda a: a.startswith(prefix_output), state_dict_keys))) + if "{}0.op.weight".format(prefix) in block_keys: #new layer - if last_transformer_depth > 0: - attention_resolutions.append(current_res) - transformer_depth.append(last_transformer_depth) num_res_blocks.append(last_res_blocks) channel_mult.append(last_channel_mult) current_res *= 2 last_res_blocks = 0 - last_transformer_depth = 0 last_channel_mult = 0 + out = calculate_transformer_depth(prefix_output, state_dict_keys, state_dict) + if out is not None: + transformer_depth_output.append(out[0]) + else: + transformer_depth_output.append(0) else: res_block_prefix = "{}0.in_layers.0.weight".format(prefix) if res_block_prefix in block_keys: last_res_blocks += 1 last_channel_mult = state_dict["{}0.out_layers.3.weight".format(prefix)].shape[0] // model_channels - transformer_prefix = prefix + "1.transformer_blocks." - transformer_keys = sorted(list(filter(lambda a: a.startswith(transformer_prefix), state_dict_keys))) - if len(transformer_keys) > 0: - last_transformer_depth = count_blocks(state_dict_keys, transformer_prefix + '{}') - if context_dim is None: - context_dim = state_dict['{}0.attn2.to_k.weight'.format(transformer_prefix)].shape[1] - use_linear_in_transformer = len(state_dict['{}1.proj_in.weight'.format(prefix)].shape) == 2 + out = calculate_transformer_depth(prefix, state_dict_keys, state_dict) + if out is not None: + transformer_depth.append(out[0]) + if context_dim is None: + context_dim = out[1] + use_linear_in_transformer = out[2] + else: + transformer_depth.append(0) + + res_block_prefix = "{}0.in_layers.0.weight".format(prefix_output) + if res_block_prefix in block_keys_output: + out = calculate_transformer_depth(prefix_output, state_dict_keys, state_dict) + if out is not None: + transformer_depth_output.append(out[0]) + else: + transformer_depth_output.append(0) - count += 1 - if last_transformer_depth > 0: - attention_resolutions.append(current_res) - transformer_depth.append(last_transformer_depth) num_res_blocks.append(last_res_blocks) channel_mult.append(last_channel_mult) - transformer_depth_middle = count_blocks(state_dict_keys, '{}middle_block.1.transformer_blocks.'.format(key_prefix) + '{}') - - if len(set(num_res_blocks)) == 1: - num_res_blocks = num_res_blocks[0] - - if len(set(transformer_depth)) == 1: - transformer_depth = transformer_depth[0] + if "{}middle_block.1.proj_in.weight".format(key_prefix) in state_dict_keys: + transformer_depth_middle = count_blocks(state_dict_keys, '{}middle_block.1.transformer_blocks.'.format(key_prefix) + '{}') + else: + transformer_depth_middle = -1 unet_config["in_channels"] = in_channels unet_config["model_channels"] = model_channels unet_config["num_res_blocks"] = num_res_blocks - unet_config["attention_resolutions"] = attention_resolutions unet_config["transformer_depth"] = transformer_depth + unet_config["transformer_depth_output"] = transformer_depth_output unet_config["channel_mult"] = channel_mult unet_config["transformer_depth_middle"] = transformer_depth_middle unet_config['use_linear_in_transformer'] = use_linear_in_transformer @@ -124,6 +145,45 @@ def model_config_from_unet(state_dict, unet_key_prefix, dtype, use_base_if_no_ma else: return model_config +def convert_config(unet_config): + new_config = unet_config.copy() + num_res_blocks = new_config.get("num_res_blocks", None) + channel_mult = new_config.get("channel_mult", None) + + if isinstance(num_res_blocks, int): + num_res_blocks = len(channel_mult) * [num_res_blocks] + + if "attention_resolutions" in new_config: + attention_resolutions = new_config.pop("attention_resolutions") + transformer_depth = new_config.get("transformer_depth", None) + transformer_depth_middle = new_config.get("transformer_depth_middle", None) + + if isinstance(transformer_depth, int): + transformer_depth = len(channel_mult) * [transformer_depth] + if transformer_depth_middle is None: + transformer_depth_middle = transformer_depth[-1] + t_in = [] + t_out = [] + s = 1 + for i in range(len(num_res_blocks)): + res = num_res_blocks[i] + d = 0 + if s in attention_resolutions: + d = transformer_depth[i] + + t_in += [d] * res + t_out += [d] * (res + 1) + s *= 2 + transformer_depth = t_in + transformer_depth_output = t_out + new_config["transformer_depth"] = t_in + new_config["transformer_depth_output"] = t_out + new_config["transformer_depth_middle"] = transformer_depth_middle + + new_config["num_res_blocks"] = num_res_blocks + return new_config + + def unet_config_from_diffusers_unet(state_dict, dtype): match = {} attention_resolutions = [] @@ -200,7 +260,7 @@ def unet_config_from_diffusers_unet(state_dict, dtype): matches = False break if matches: - return unet_config + return convert_config(unet_config) return None def model_config_from_diffusers_unet(state_dict, dtype): diff --git a/comfy/sd.py b/comfy/sd.py index c364b723c..aea55bbdf 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -360,7 +360,7 @@ def load_checkpoint(config_path=None, ckpt_path=None, output_vae=True, output_cl from . import latent_formats model_config.latent_format = latent_formats.SD15(scale_factor=scale_factor) - model_config.unet_config = unet_config + model_config.unet_config = model_detection.convert_config(unet_config) if config['model']["target"].endswith("ImageEmbeddingConditionedLatentDiffusion"): model = model_base.SD21UNCLIP(model_config, noise_aug_config["params"], model_type=model_type) diff --git a/comfy/supported_models.py b/comfy/supported_models.py index bb8ae2148..820f2861c 100644 --- a/comfy/supported_models.py +++ b/comfy/supported_models.py @@ -104,7 +104,7 @@ class SDXLRefiner(supported_models_base.BASE): "use_linear_in_transformer": True, "context_dim": 1280, "adm_in_channels": 2560, - "transformer_depth": [0, 4, 4, 0], + "transformer_depth": [0, 0, 4, 4, 4, 4, 0, 0], } latent_format = latent_formats.SDXL @@ -139,7 +139,7 @@ class SDXL(supported_models_base.BASE): unet_config = { "model_channels": 320, "use_linear_in_transformer": True, - "transformer_depth": [0, 2, 10], + "transformer_depth": [0, 0, 2, 2, 10, 10], "context_dim": 2048, "adm_in_channels": 2816 } @@ -165,6 +165,7 @@ class SDXL(supported_models_base.BASE): replace_prefix["conditioner.embedders.0.transformer.text_model"] = "cond_stage_model.clip_l.transformer.text_model" state_dict = utils.transformers_convert(state_dict, "conditioner.embedders.1.model.", "cond_stage_model.clip_g.transformer.text_model.", 32) keys_to_replace["conditioner.embedders.1.model.text_projection"] = "cond_stage_model.clip_g.text_projection" + keys_to_replace["conditioner.embedders.1.model.text_projection.weight"] = "cond_stage_model.clip_g.text_projection" keys_to_replace["conditioner.embedders.1.model.logit_scale"] = "cond_stage_model.clip_g.logit_scale" state_dict = utils.state_dict_prefix_replace(state_dict, replace_prefix) @@ -189,5 +190,14 @@ class SDXL(supported_models_base.BASE): def clip_target(self): return supported_models_base.ClipTarget(sdxl_clip.SDXLTokenizer, sdxl_clip.SDXLClipModel) +class SSD1B(SDXL): + unet_config = { + "model_channels": 320, + "use_linear_in_transformer": True, + "transformer_depth": [0, 0, 2, 2, 4, 4], + "context_dim": 2048, + "adm_in_channels": 2816 + } -models = [SD15, SD20, SD21UnclipL, SD21UnclipH, SDXLRefiner, SDXL] + +models = [SD15, SD20, SD21UnclipL, SD21UnclipH, SDXLRefiner, SDXL, SSD1B] diff --git a/comfy/utils.py b/comfy/utils.py index a1807aa1d..6a0c54e80 100644 --- a/comfy/utils.py +++ b/comfy/utils.py @@ -170,25 +170,12 @@ UNET_MAP_BASIC = { def unet_to_diffusers(unet_config): num_res_blocks = unet_config["num_res_blocks"] - attention_resolutions = unet_config["attention_resolutions"] channel_mult = unet_config["channel_mult"] - transformer_depth = unet_config["transformer_depth"] + transformer_depth = unet_config["transformer_depth"][:] + transformer_depth_output = unet_config["transformer_depth_output"][:] num_blocks = len(channel_mult) - if isinstance(num_res_blocks, int): - num_res_blocks = [num_res_blocks] * num_blocks - if isinstance(transformer_depth, int): - transformer_depth = [transformer_depth] * num_blocks - transformers_per_layer = [] - res = 1 - for i in range(num_blocks): - transformers = 0 - if res in attention_resolutions: - transformers = transformer_depth[i] - transformers_per_layer.append(transformers) - res *= 2 - - transformers_mid = unet_config.get("transformer_depth_middle", transformer_depth[-1]) + transformers_mid = unet_config.get("transformer_depth_middle", None) diffusers_unet_map = {} for x in range(num_blocks): @@ -196,10 +183,11 @@ def unet_to_diffusers(unet_config): for i in range(num_res_blocks[x]): for b in UNET_MAP_RESNET: diffusers_unet_map["down_blocks.{}.resnets.{}.{}".format(x, i, UNET_MAP_RESNET[b])] = "input_blocks.{}.0.{}".format(n, b) - if transformers_per_layer[x] > 0: + num_transformers = transformer_depth.pop(0) + if num_transformers > 0: for b in UNET_MAP_ATTENTIONS: diffusers_unet_map["down_blocks.{}.attentions.{}.{}".format(x, i, b)] = "input_blocks.{}.1.{}".format(n, b) - for t in range(transformers_per_layer[x]): + for t in range(num_transformers): for b in TRANSFORMER_BLOCKS: diffusers_unet_map["down_blocks.{}.attentions.{}.transformer_blocks.{}.{}".format(x, i, t, b)] = "input_blocks.{}.1.transformer_blocks.{}.{}".format(n, t, b) n += 1 @@ -218,7 +206,6 @@ def unet_to_diffusers(unet_config): diffusers_unet_map["mid_block.resnets.{}.{}".format(i, UNET_MAP_RESNET[b])] = "middle_block.{}.{}".format(n, b) num_res_blocks = list(reversed(num_res_blocks)) - transformers_per_layer = list(reversed(transformers_per_layer)) for x in range(num_blocks): n = (num_res_blocks[x] + 1) * x l = num_res_blocks[x] + 1 @@ -227,11 +214,12 @@ def unet_to_diffusers(unet_config): for b in UNET_MAP_RESNET: diffusers_unet_map["up_blocks.{}.resnets.{}.{}".format(x, i, UNET_MAP_RESNET[b])] = "output_blocks.{}.0.{}".format(n, b) c += 1 - if transformers_per_layer[x] > 0: + num_transformers = transformer_depth_output.pop() + if num_transformers > 0: c += 1 for b in UNET_MAP_ATTENTIONS: diffusers_unet_map["up_blocks.{}.attentions.{}.{}".format(x, i, b)] = "output_blocks.{}.1.{}".format(n, b) - for t in range(transformers_per_layer[x]): + for t in range(num_transformers): for b in TRANSFORMER_BLOCKS: diffusers_unet_map["up_blocks.{}.attentions.{}.transformer_blocks.{}.{}".format(x, i, t, b)] = "output_blocks.{}.1.transformer_blocks.{}.{}".format(n, t, b) if i == l - 1: From e60ca6929a999f53a4eeb62cc80f70b1cd7a0acf Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 27 Oct 2023 15:54:04 -0400 Subject: [PATCH 036/170] SD1 and SD2 clip and tokenizer code is now more similar to the SDXL one. --- comfy/lora.py | 6 ++++-- comfy/sd1_clip.py | 41 +++++++++++++++++++++++++++++++++++++-- comfy/sd2_clip.py | 12 ++++++++++-- comfy/sdxl_clip.py | 29 +++++++-------------------- comfy/supported_models.py | 11 +++++++++-- 5 files changed, 69 insertions(+), 30 deletions(-) diff --git a/comfy/lora.py b/comfy/lora.py index 3009a1c9e..d4cf94c95 100644 --- a/comfy/lora.py +++ b/comfy/lora.py @@ -141,9 +141,9 @@ def model_lora_keys_clip(model, key_map={}): text_model_lora_key = "lora_te_text_model_encoder_layers_{}_{}" clip_l_present = False - for b in range(32): + for b in range(32): #TODO: clean up for c in LORA_CLIP_MAP: - k = "transformer.text_model.encoder.layers.{}.{}.weight".format(b, c) + k = "clip_h.transformer.text_model.encoder.layers.{}.{}.weight".format(b, c) if k in sdk: lora_key = text_model_lora_key.format(b, LORA_CLIP_MAP[c]) key_map[lora_key] = k @@ -154,6 +154,8 @@ def model_lora_keys_clip(model, key_map={}): k = "clip_l.transformer.text_model.encoder.layers.{}.{}.weight".format(b, c) if k in sdk: + lora_key = text_model_lora_key.format(b, LORA_CLIP_MAP[c]) + key_map[lora_key] = k lora_key = "lora_te1_text_model_encoder_layers_{}_{}".format(b, LORA_CLIP_MAP[c]) #SDXL base key_map[lora_key] = k clip_l_present = True diff --git a/comfy/sd1_clip.py b/comfy/sd1_clip.py index ffe2bd3bd..5368a45df 100644 --- a/comfy/sd1_clip.py +++ b/comfy/sd1_clip.py @@ -35,7 +35,7 @@ class ClipTokenWeightEncoder: return z_empty.cpu(), first_pooled.cpu() return torch.cat(output, dim=-2).cpu(), first_pooled.cpu() -class SD1ClipModel(torch.nn.Module, ClipTokenWeightEncoder): +class SDClipModel(torch.nn.Module, ClipTokenWeightEncoder): """Uses the CLIP transformer encoder for text (from huggingface)""" LAYERS = [ "last", @@ -342,7 +342,7 @@ def load_embed(embedding_name, embedding_directory, embedding_size, embed_key=No embed_out = next(iter(values)) return embed_out -class SD1Tokenizer: +class SDTokenizer: def __init__(self, tokenizer_path=None, max_length=77, pad_with_end=True, embedding_directory=None, embedding_size=768, embedding_key='clip_l'): if tokenizer_path is None: tokenizer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "sd1_tokenizer") @@ -454,3 +454,40 @@ class SD1Tokenizer: def untokenize(self, token_weight_pair): return list(map(lambda a: (a, self.inv_vocab[a[0]]), token_weight_pair)) + + +class SD1Tokenizer: + def __init__(self, embedding_directory=None, clip_name="l", tokenizer=SDTokenizer): + self.clip_name = clip_name + self.clip = "clip_{}".format(self.clip_name) + setattr(self, self.clip, tokenizer(embedding_directory=embedding_directory)) + + def tokenize_with_weights(self, text:str, return_word_ids=False): + out = {} + out[self.clip_name] = getattr(self, self.clip).tokenize_with_weights(text, return_word_ids) + return out + + def untokenize(self, token_weight_pair): + return getattr(self, self.clip).untokenize(token_weight_pair) + + +class SD1ClipModel(torch.nn.Module): + def __init__(self, device="cpu", dtype=None, clip_name="l", clip_model=SDClipModel): + super().__init__() + self.clip_name = clip_name + self.clip = "clip_{}".format(self.clip_name) + setattr(self, self.clip, clip_model(device=device, dtype=dtype)) + + def clip_layer(self, layer_idx): + getattr(self, self.clip).clip_layer(layer_idx) + + def reset_clip_layer(self): + getattr(self, self.clip).reset_clip_layer() + + def encode_token_weights(self, token_weight_pairs): + token_weight_pairs = token_weight_pairs[self.clip_name] + out, pooled = getattr(self, self.clip).encode_token_weights(token_weight_pairs) + return out, pooled + + def load_sd(self, sd): + return getattr(self, self.clip).load_sd(sd) diff --git a/comfy/sd2_clip.py b/comfy/sd2_clip.py index 05e50a005..9df868b76 100644 --- a/comfy/sd2_clip.py +++ b/comfy/sd2_clip.py @@ -2,7 +2,7 @@ from comfy import sd1_clip import torch import os -class SD2ClipModel(sd1_clip.SD1ClipModel): +class SD2ClipHModel(sd1_clip.SDClipModel): def __init__(self, arch="ViT-H-14", device="cpu", max_length=77, freeze=True, layer="penultimate", layer_idx=None, textmodel_path=None, dtype=None): if layer == "penultimate": layer="hidden" @@ -12,6 +12,14 @@ class SD2ClipModel(sd1_clip.SD1ClipModel): super().__init__(device=device, freeze=freeze, layer=layer, layer_idx=layer_idx, textmodel_json_config=textmodel_json_config, textmodel_path=textmodel_path, dtype=dtype) self.empty_tokens = [[49406] + [49407] + [0] * 75] -class SD2Tokenizer(sd1_clip.SD1Tokenizer): +class SD2ClipHTokenizer(sd1_clip.SDTokenizer): def __init__(self, tokenizer_path=None, embedding_directory=None): super().__init__(tokenizer_path, pad_with_end=False, embedding_directory=embedding_directory, embedding_size=1024) + +class SD2Tokenizer(sd1_clip.SD1Tokenizer): + def __init__(self, embedding_directory=None): + super().__init__(embedding_directory=embedding_directory, clip_name="h", tokenizer=SD2ClipHTokenizer) + +class SD2ClipModel(sd1_clip.SD1ClipModel): + def __init__(self, device="cpu", dtype=None): + super().__init__(device=device, dtype=dtype, clip_name="h", clip_model=SD2ClipHModel) diff --git a/comfy/sdxl_clip.py b/comfy/sdxl_clip.py index e3ac2ee0b..4c508a0ea 100644 --- a/comfy/sdxl_clip.py +++ b/comfy/sdxl_clip.py @@ -2,7 +2,7 @@ from comfy import sd1_clip import torch import os -class SDXLClipG(sd1_clip.SD1ClipModel): +class SDXLClipG(sd1_clip.SDClipModel): def __init__(self, device="cpu", max_length=77, freeze=True, layer="penultimate", layer_idx=None, textmodel_path=None, dtype=None): if layer == "penultimate": layer="hidden" @@ -16,14 +16,14 @@ class SDXLClipG(sd1_clip.SD1ClipModel): def load_sd(self, sd): return super().load_sd(sd) -class SDXLClipGTokenizer(sd1_clip.SD1Tokenizer): +class SDXLClipGTokenizer(sd1_clip.SDTokenizer): def __init__(self, tokenizer_path=None, embedding_directory=None): super().__init__(tokenizer_path, pad_with_end=False, embedding_directory=embedding_directory, embedding_size=1280, embedding_key='clip_g') -class SDXLTokenizer(sd1_clip.SD1Tokenizer): +class SDXLTokenizer: def __init__(self, embedding_directory=None): - self.clip_l = sd1_clip.SD1Tokenizer(embedding_directory=embedding_directory) + self.clip_l = sd1_clip.SDTokenizer(embedding_directory=embedding_directory) self.clip_g = SDXLClipGTokenizer(embedding_directory=embedding_directory) def tokenize_with_weights(self, text:str, return_word_ids=False): @@ -38,7 +38,7 @@ class SDXLTokenizer(sd1_clip.SD1Tokenizer): class SDXLClipModel(torch.nn.Module): def __init__(self, device="cpu", dtype=None): super().__init__() - self.clip_l = sd1_clip.SD1ClipModel(layer="hidden", layer_idx=11, device=device, dtype=dtype) + self.clip_l = sd1_clip.SDClipModel(layer="hidden", layer_idx=11, device=device, dtype=dtype) self.clip_l.layer_norm_hidden_state = False self.clip_g = SDXLClipG(device=device, dtype=dtype) @@ -63,21 +63,6 @@ class SDXLClipModel(torch.nn.Module): else: return self.clip_l.load_sd(sd) -class SDXLRefinerClipModel(torch.nn.Module): +class SDXLRefinerClipModel(sd1_clip.SD1ClipModel): def __init__(self, device="cpu", dtype=None): - super().__init__() - self.clip_g = SDXLClipG(device=device, dtype=dtype) - - def clip_layer(self, layer_idx): - self.clip_g.clip_layer(layer_idx) - - def reset_clip_layer(self): - self.clip_g.reset_clip_layer() - - def encode_token_weights(self, token_weight_pairs): - token_weight_pairs_g = token_weight_pairs["g"] - g_out, g_pooled = self.clip_g.encode_token_weights(token_weight_pairs_g) - return g_out, g_pooled - - def load_sd(self, sd): - return self.clip_g.load_sd(sd) + super().__init__(device=device, dtype=dtype, clip_name="g", clip_model=SDXLClipG) diff --git a/comfy/supported_models.py b/comfy/supported_models.py index 820f2861c..fdd4ea4f5 100644 --- a/comfy/supported_models.py +++ b/comfy/supported_models.py @@ -38,8 +38,15 @@ class SD15(supported_models_base.BASE): if ids.dtype == torch.float32: state_dict['cond_stage_model.transformer.text_model.embeddings.position_ids'] = ids.round() + replace_prefix = {} + replace_prefix["cond_stage_model."] = "cond_stage_model.clip_l." + state_dict = utils.state_dict_prefix_replace(state_dict, replace_prefix) return state_dict + def process_clip_state_dict_for_saving(self, state_dict): + replace_prefix = {"clip_l.": "cond_stage_model."} + return utils.state_dict_prefix_replace(state_dict, replace_prefix) + def clip_target(self): return supported_models_base.ClipTarget(sd1_clip.SD1Tokenizer, sd1_clip.SD1ClipModel) @@ -62,12 +69,12 @@ class SD20(supported_models_base.BASE): return model_base.ModelType.EPS def process_clip_state_dict(self, state_dict): - state_dict = utils.transformers_convert(state_dict, "cond_stage_model.model.", "cond_stage_model.transformer.text_model.", 24) + state_dict = utils.transformers_convert(state_dict, "cond_stage_model.model.", "cond_stage_model.clip_h.transformer.text_model.", 24) return state_dict def process_clip_state_dict_for_saving(self, state_dict): replace_prefix = {} - replace_prefix[""] = "cond_stage_model.model." + replace_prefix["clip_h"] = "cond_stage_model.model" state_dict = utils.state_dict_prefix_replace(state_dict, replace_prefix) state_dict = diffusers_convert.convert_text_enc_state_dict_v20(state_dict) return state_dict From 2a134bfab9788b6a0a70aea3172d8e3fc904b414 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 27 Oct 2023 22:13:55 -0400 Subject: [PATCH 037/170] Fix checkpoint loader with config. --- comfy/sd.py | 6 ++++-- comfy/sd1_clip.py | 4 ++-- comfy/sd2_clip.py | 4 ++-- 3 files changed, 8 insertions(+), 6 deletions(-) diff --git a/comfy/sd.py b/comfy/sd.py index aea55bbdf..4a2823c9d 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -388,11 +388,13 @@ def load_checkpoint(config_path=None, ckpt_path=None, output_vae=True, output_cl if clip_config["target"].endswith("FrozenOpenCLIPEmbedder"): clip_target.clip = sd2_clip.SD2ClipModel clip_target.tokenizer = sd2_clip.SD2Tokenizer + clip = CLIP(clip_target, embedding_directory=embedding_directory) + w.cond_stage_model = clip.cond_stage_model.clip_h elif clip_config["target"].endswith("FrozenCLIPEmbedder"): clip_target.clip = sd1_clip.SD1ClipModel clip_target.tokenizer = sd1_clip.SD1Tokenizer - clip = CLIP(clip_target, embedding_directory=embedding_directory) - w.cond_stage_model = clip.cond_stage_model + clip = CLIP(clip_target, embedding_directory=embedding_directory) + w.cond_stage_model = clip.cond_stage_model.clip_l load_clip_weights(w, state_dict) return (comfy.model_patcher.ModelPatcher(model, load_device=model_management.get_torch_device(), offload_device=offload_device), clip, vae) diff --git a/comfy/sd1_clip.py b/comfy/sd1_clip.py index 5368a45df..fdaa1e6c7 100644 --- a/comfy/sd1_clip.py +++ b/comfy/sd1_clip.py @@ -472,11 +472,11 @@ class SD1Tokenizer: class SD1ClipModel(torch.nn.Module): - def __init__(self, device="cpu", dtype=None, clip_name="l", clip_model=SDClipModel): + def __init__(self, device="cpu", dtype=None, clip_name="l", clip_model=SDClipModel, **kwargs): super().__init__() self.clip_name = clip_name self.clip = "clip_{}".format(self.clip_name) - setattr(self, self.clip, clip_model(device=device, dtype=dtype)) + setattr(self, self.clip, clip_model(device=device, dtype=dtype, **kwargs)) def clip_layer(self, layer_idx): getattr(self, self.clip).clip_layer(layer_idx) diff --git a/comfy/sd2_clip.py b/comfy/sd2_clip.py index 9df868b76..ebabf7ccd 100644 --- a/comfy/sd2_clip.py +++ b/comfy/sd2_clip.py @@ -21,5 +21,5 @@ class SD2Tokenizer(sd1_clip.SD1Tokenizer): super().__init__(embedding_directory=embedding_directory, clip_name="h", tokenizer=SD2ClipHTokenizer) class SD2ClipModel(sd1_clip.SD1ClipModel): - def __init__(self, device="cpu", dtype=None): - super().__init__(device=device, dtype=dtype, clip_name="h", clip_model=SD2ClipHModel) + def __init__(self, device="cpu", dtype=None, **kwargs): + super().__init__(device=device, dtype=dtype, clip_name="h", clip_model=SD2ClipHModel, **kwargs) From aac8fc99d6a06e9e3b4c0689c1fff3d379dd0672 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sat, 28 Oct 2023 12:24:50 -0400 Subject: [PATCH 038/170] Cleanup webp import code a bit. --- web/scripts/pnginfo.js | 26 +++++++++++--------------- 1 file changed, 11 insertions(+), 15 deletions(-) diff --git a/web/scripts/pnginfo.js b/web/scripts/pnginfo.js index 4dc3a032c..491caed79 100644 --- a/web/scripts/pnginfo.js +++ b/web/scripts/pnginfo.js @@ -108,29 +108,25 @@ export function getWebpMetadata(file) { return new Promise((r) => { const reader = new FileReader(); reader.onload = (event) => { - // Get the PNG data as a Uint8Array - const pngData = new Uint8Array(event.target.result); - const dataView = new DataView(pngData.buffer); + const webp = new Uint8Array(event.target.result); + const dataView = new DataView(webp.buffer); - // Check that the PNG signature is present + // Check that the WEBP signature is present if (dataView.getUint32(0) !== 0x52494646 || dataView.getUint32(8) !== 0x57454250) { console.error("Not a valid WEBP file"); r(); return; } - // Start searching for chunks after the PNG signature + // Start searching for chunks after the WEBP signature let offset = 12; let txt_chunks = {}; - // Loop through the chunks in the PNG file - while (offset < pngData.length) { - // Get the length of the chunk - const length = dataView.getUint32(offset + 4, true); - // Get the chunk type - const type = String.fromCharCode(...pngData.slice(offset, offset + 4)); - if (type === "EXIF") { - // Get the keyword - let data = parseExifData(pngData.slice(offset + 8, offset + 8 + length)); + // Loop through the chunks in the WEBP file + while (offset < webp.length) { + const chunk_length = dataView.getUint32(offset + 4, true); + const chunk_type = String.fromCharCode(...webp.slice(offset, offset + 4)); + if (chunk_type === "EXIF") { + let data = parseExifData(webp.slice(offset + 8, offset + 8 + chunk_length)); for (var key in data) { var value = data[key]; let index = value.indexOf(':'); @@ -138,7 +134,7 @@ export function getWebpMetadata(file) { } } - offset += 8 + length; + offset += 8 + chunk_length; } r(txt_chunks); From a12cc0532328b93b6d8d4d5a0ca3000d0b24b72c Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sun, 29 Oct 2023 03:55:46 -0400 Subject: [PATCH 039/170] Add --max-upload-size argument, the default is 100MB. --- comfy/cli_args.py | 2 ++ server.py | 3 ++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/comfy/cli_args.py b/comfy/cli_args.py index d86557646..e79b89c0f 100644 --- a/comfy/cli_args.py +++ b/comfy/cli_args.py @@ -36,6 +36,8 @@ parser = argparse.ArgumentParser() parser.add_argument("--listen", type=str, default="127.0.0.1", metavar="IP", nargs="?", const="0.0.0.0", help="Specify the IP address to listen on (default: 127.0.0.1). If --listen is provided without an argument, it defaults to 0.0.0.0. (listens on all)") parser.add_argument("--port", type=int, default=8188, help="Set the listen port.") parser.add_argument("--enable-cors-header", type=str, default=None, metavar="ORIGIN", nargs="?", const="*", help="Enable CORS (Cross-Origin Resource Sharing) with optional origin or allow all with default '*'.") +parser.add_argument("--max-upload-size", type=float, default=100, help="Set the maximum upload size in MB.") + parser.add_argument("--extra-model-paths-config", type=str, default=None, metavar="PATH", nargs='+', action='append', help="Load one or more extra_model_paths.yaml files.") parser.add_argument("--output-directory", type=str, default=None, help="Set the ComfyUI output directory.") parser.add_argument("--temp-directory", type=str, default=None, help="Set the ComfyUI temp directory (default is in the ComfyUI directory).") diff --git a/server.py b/server.py index 63f337a87..11bd2a0fb 100644 --- a/server.py +++ b/server.py @@ -82,7 +82,8 @@ class PromptServer(): if args.enable_cors_header: middlewares.append(create_cors_middleware(args.enable_cors_header)) - self.app = web.Application(client_max_size=104857600, middlewares=middlewares) + max_upload_size = round(args.max_upload_size * 1024 * 1024) + self.app = web.Application(client_max_size=max_upload_size, middlewares=middlewares) self.sockets = dict() self.web_root = os.path.join(os.path.dirname( os.path.realpath(__file__)), "web") From 41b07ff8d7807292b56147e12347ab96972c9406 Mon Sep 17 00:00:00 2001 From: Jedrzej Kosinski Date: Sun, 29 Oct 2023 13:30:23 -0500 Subject: [PATCH 040/170] Fix TAESD preview to only decode first latent, instead of all --- latent_preview.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/latent_preview.py b/latent_preview.py index e1553c85c..6e758a1a9 100644 --- a/latent_preview.py +++ b/latent_preview.py @@ -22,7 +22,7 @@ class TAESDPreviewerImpl(LatentPreviewer): self.taesd = taesd def decode_latent_to_preview(self, x0): - x_sample = self.taesd.decoder(x0)[0].detach() + x_sample = self.taesd.decoder(x0[:1])[0].detach() # x_sample = self.taesd.unscale_latents(x_sample).div(4).add(0.5) # returns value in [-2, 2] x_sample = x_sample.sub(0.5).mul(2) From 125b03eeadd2ea3e97984e421e90e48d8dd67dbf Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 30 Oct 2023 13:14:11 -0400 Subject: [PATCH 041/170] Fix some OOM issues with split attention. --- comfy/ldm/modules/attention.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/comfy/ldm/modules/attention.py b/comfy/ldm/modules/attention.py index 4f10bbc35..9840cc7f5 100644 --- a/comfy/ldm/modules/attention.py +++ b/comfy/ldm/modules/attention.py @@ -229,7 +229,7 @@ def attention_split(q, k, v, heads, mask=None): gb = 1024 ** 3 tensor_size = q.shape[0] * q.shape[1] * k.shape[1] * element_size - modifier = 3 if element_size == 2 else 2.5 + modifier = 3 mem_required = tensor_size * modifier steps = 1 @@ -257,10 +257,10 @@ def attention_split(q, k, v, heads, mask=None): s1 = einsum('b i d, b j d -> b i j', q[:, i:end].float(), k.float()) * scale else: s1 = einsum('b i d, b j d -> b i j', q[:, i:end], k) * scale - first_op_done = True s2 = s1.softmax(dim=-1).to(v.dtype) del s1 + first_op_done = True r1[:, i:end] = einsum('b i j, b j d -> b i d', s2, v) del s2 From c837a173fab41b7132a72ab01b256b714bd6adb2 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 30 Oct 2023 15:29:45 -0400 Subject: [PATCH 042/170] Fix some memory issues in sub quad attention. --- comfy/ldm/modules/attention.py | 33 ++++++++++----------------------- 1 file changed, 10 insertions(+), 23 deletions(-) diff --git a/comfy/ldm/modules/attention.py b/comfy/ldm/modules/attention.py index 9840cc7f5..016795a59 100644 --- a/comfy/ldm/modules/attention.py +++ b/comfy/ldm/modules/attention.py @@ -160,32 +160,19 @@ def attention_sub_quad(query, key, value, heads, mask=None): mem_free_total, mem_free_torch = model_management.get_free_memory(query.device, True) - chunk_threshold_bytes = mem_free_torch * 0.5 #Using only this seems to work better on AMD - kv_chunk_size_min = None + kv_chunk_size = None + query_chunk_size = None - #not sure at all about the math here - #TODO: tweak this - if mem_free_total > 8192 * 1024 * 1024 * 1.3: - query_chunk_size_x = 1024 * 4 - elif mem_free_total > 4096 * 1024 * 1024 * 1.3: - query_chunk_size_x = 1024 * 2 - else: - query_chunk_size_x = 1024 - kv_chunk_size_min_x = None - kv_chunk_size_x = (int((chunk_threshold_bytes // (batch_x_heads * bytes_per_token * query_chunk_size_x)) * 2.0) // 1024) * 1024 - if kv_chunk_size_x < 1024: - kv_chunk_size_x = None + for x in [4096, 2048, 1024, 512, 256]: + count = mem_free_total / (batch_x_heads * bytes_per_token * x * 4.0) + if count >= k_tokens: + kv_chunk_size = k_tokens + query_chunk_size = x + break - if chunk_threshold_bytes is not None and qk_matmul_size_bytes <= chunk_threshold_bytes: - # the big matmul fits into our memory limit; do everything in 1 chunk, - # i.e. send it down the unchunked fast-path - query_chunk_size = q_tokens - kv_chunk_size = k_tokens - else: - query_chunk_size = query_chunk_size_x - kv_chunk_size = kv_chunk_size_x - kv_chunk_size_min = kv_chunk_size_min_x + if query_chunk_size is None: + query_chunk_size = 512 hidden_states = efficient_dot_product_attention( query, From 23c5d17837f788df77cfa80a0453d7cdddfe0fe8 Mon Sep 17 00:00:00 2001 From: tsone Date: Tue, 31 Oct 2023 20:54:33 +0100 Subject: [PATCH 043/170] Added Bayer dithering to Quantize node. --- comfy_extras/nodes_post_processing.py | 46 +++++++++++++++++++++------ 1 file changed, 37 insertions(+), 9 deletions(-) diff --git a/comfy_extras/nodes_post_processing.py b/comfy_extras/nodes_post_processing.py index 3f651e594..324cfe105 100644 --- a/comfy_extras/nodes_post_processing.py +++ b/comfy_extras/nodes_post_processing.py @@ -126,7 +126,7 @@ class Quantize: "max": 256, "step": 1 }), - "dither": (["none", "floyd-steinberg"],), + "dither": (["none", "floyd-steinberg", "bayer-2", "bayer-4", "bayer-8", "bayer-16"],), }, } @@ -135,19 +135,47 @@ class Quantize: CATEGORY = "image/postprocessing" - def quantize(self, image: torch.Tensor, colors: int = 256, dither: str = "FLOYDSTEINBERG"): + def bayer(im, pal_im, order): + def normalized_bayer_matrix(n): + if n == 0: + return np.zeros((1,1), "float32") + else: + q = 4 ** n + m = q * normalized_bayer_matrix(n - 1) + return np.bmat(((m-1.5, m+0.5), (m+1.5, m-0.5))) / q + + num_colors = len(pal_im.getpalette()) // 3 + spread = 2 * 256 / num_colors + bayer_n = int(math.log2(order)) + bayer_matrix = torch.from_numpy(spread * normalized_bayer_matrix(bayer_n) + 0.5) + + result = torch.from_numpy(np.array(im).astype(np.float32)) + tw = math.ceil(result.shape[0] / bayer_matrix.shape[0]) + th = math.ceil(result.shape[1] / bayer_matrix.shape[1]) + tiled_matrix = bayer_matrix.tile(tw, th).unsqueeze(-1) + result.add_(tiled_matrix[:result.shape[0],:result.shape[1]]).clamp_(0, 255) + result = result.to(dtype=torch.uint8) + + im = Image.fromarray(result.cpu().numpy()) + im = im.quantize(palette=pal_im, dither=Image.Dither.NONE) + return im + + def quantize(self, image: torch.Tensor, colors: int, dither: str): batch_size, height, width, _ = image.shape result = torch.zeros_like(image) - dither_option = Image.Dither.FLOYDSTEINBERG if dither == "floyd-steinberg" else Image.Dither.NONE - for b in range(batch_size): - tensor_image = image[b] - img = (tensor_image * 255).to(torch.uint8).numpy() - pil_image = Image.fromarray(img, mode='RGB') + im = Image.fromarray((image[b] * 255).to(torch.uint8).numpy(), mode='RGB') - palette = pil_image.quantize(colors=colors) # Required as described in https://github.com/python-pillow/Pillow/issues/5836 - quantized_image = pil_image.quantize(colors=colors, palette=palette, dither=dither_option) + pal_im = im.quantize(colors=colors) # Required as described in https://github.com/python-pillow/Pillow/issues/5836 + + if dither == "none": + quantized_image = im.quantize(palette=pal_im, dither=Image.Dither.NONE) + elif dither == "floyd-steinberg": + quantized_image = im.quantize(palette=pal_im, dither=Image.Dither.FLOYDSTEINBERG) + elif dither.startswith("bayer"): + order = int(dither.split('-')[-1]) + quantized_image = Quantize.bayer(im, pal_im, order) quantized_array = torch.tensor(np.array(quantized_image.convert("RGB"))).float() / 255 result[b] = quantized_array From 1777b54d0217e77a6a64b0a587b9b11a48e3bf02 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 31 Oct 2023 17:33:43 -0400 Subject: [PATCH 044/170] Sampling code changes. apply_model in model_base now returns the denoised output. This means that sampling_function now computes things on the denoised output instead of the model output. This should make things more consistent across current and future models. --- comfy/extra_samplers/uni_pc.py | 8 ++- comfy/model_base.py | 121 ++++++++++++++++++++++++++------- comfy/samplers.py | 72 +++++++++----------- 3 files changed, 136 insertions(+), 65 deletions(-) diff --git a/comfy/extra_samplers/uni_pc.py b/comfy/extra_samplers/uni_pc.py index 9d5f0c60b..1a7a83929 100644 --- a/comfy/extra_samplers/uni_pc.py +++ b/comfy/extra_samplers/uni_pc.py @@ -852,6 +852,12 @@ class SigmaConvert: log_std = 0.5 * torch.log(1. - torch.exp(2. * log_mean_coeff)) return log_mean_coeff - log_std +def predict_eps_sigma(model, input, sigma_in, **kwargs): + sigma = sigma_in.view(sigma_in.shape[:1] + (1,) * (input.ndim - 1)) + input = input * ((sigma ** 2 + 1.0) ** 0.5) + return (input - model(input, sigma_in, **kwargs)) / sigma + + def sample_unipc(model, noise, image, sigmas, sampling_function, max_denoise, extra_args=None, callback=None, disable=False, noise_mask=None, variant='bh1'): timesteps = sigmas.clone() if sigmas[-1] == 0: @@ -874,7 +880,7 @@ def sample_unipc(model, noise, image, sigmas, sampling_function, max_denoise, ex model_type = "noise" model_fn = model_wrapper( - model.predict_eps_sigma, + lambda input, sigma, **kwargs: predict_eps_sigma(model, input, sigma, **kwargs), ns, model_type=model_type, guidance_type="uncond", diff --git a/comfy/model_base.py b/comfy/model_base.py index ea3ea61f2..b8d04a2c8 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -13,25 +13,31 @@ class ModelType(Enum): EPS = 1 V_PREDICTION = 2 -class BaseModel(torch.nn.Module): - def __init__(self, model_config, model_type=ModelType.EPS, device=None): + +#NOTE: all this sampling stuff will be moved +class EPS: + def calculate_input(self, sigma, noise): + sigma = sigma.view(sigma.shape[:1] + (1,) * (noise.ndim - 1)) + return noise / (sigma ** 2 + self.sigma_data ** 2) ** 0.5 + + def calculate_denoised(self, sigma, model_output, model_input): + sigma = sigma.view(sigma.shape[:1] + (1,) * (model_output.ndim - 1)) + return model_input - model_output * sigma + + +class V_PREDICTION(EPS): + def calculate_denoised(self, sigma, model_output, model_input): + sigma = sigma.view(sigma.shape[:1] + (1,) * (model_output.ndim - 1)) + return model_input * self.sigma_data ** 2 / (sigma ** 2 + self.sigma_data ** 2) - model_output * sigma * self.sigma_data / (sigma ** 2 + self.sigma_data ** 2) ** 0.5 + + +class ModelSamplingDiscrete(torch.nn.Module): + def __init__(self, model_config): super().__init__() + self._register_schedule(given_betas=None, beta_schedule=model_config.beta_schedule, timesteps=1000, linear_start=0.00085, linear_end=0.012, cosine_s=8e-3) + self.sigma_data = 1.0 - unet_config = model_config.unet_config - self.latent_format = model_config.latent_format - self.model_config = model_config - self.register_schedule(given_betas=None, beta_schedule=model_config.beta_schedule, timesteps=1000, linear_start=0.00085, linear_end=0.012, cosine_s=8e-3) - if not unet_config.get("disable_unet_model_creation", False): - self.diffusion_model = UNetModel(**unet_config, device=device) - self.model_type = model_type - self.adm_channels = unet_config.get("adm_in_channels", None) - if self.adm_channels is None: - self.adm_channels = 0 - self.inpaint_model = False - print("model_type", model_type.name) - print("adm", self.adm_channels) - - def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, + def _register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): if given_betas is not None: betas = given_betas @@ -39,31 +45,94 @@ class BaseModel(torch.nn.Module): betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) alphas = 1. - betas alphas_cumprod = np.cumprod(alphas, axis=0) - alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) + # alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) timesteps, = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end - self.register_buffer('betas', torch.tensor(betas, dtype=torch.float32)) - self.register_buffer('alphas_cumprod', torch.tensor(alphas_cumprod, dtype=torch.float32)) - self.register_buffer('alphas_cumprod_prev', torch.tensor(alphas_cumprod_prev, dtype=torch.float32)) + # self.register_buffer('betas', torch.tensor(betas, dtype=torch.float32)) + # self.register_buffer('alphas_cumprod', torch.tensor(alphas_cumprod, dtype=torch.float32)) + # self.register_buffer('alphas_cumprod_prev', torch.tensor(alphas_cumprod_prev, dtype=torch.float32)) + + sigmas = torch.tensor(((1 - alphas_cumprod) / alphas_cumprod) ** 0.5, dtype=torch.float32) + + self.register_buffer('sigmas', sigmas) + self.register_buffer('log_sigmas', sigmas.log()) + + @property + def sigma_min(self): + return self.sigmas[0] + + @property + def sigma_max(self): + return self.sigmas[-1] + + def timestep(self, sigma): + log_sigma = sigma.log() + dists = log_sigma.to(self.log_sigmas.device) - self.log_sigmas[:, None] + return dists.abs().argmin(dim=0).view(sigma.shape) + + def sigma(self, timestep): + t = torch.clamp(timestep.float(), min=0, max=(len(self.sigmas) - 1)) + low_idx = t.floor().long() + high_idx = t.ceil().long() + w = t.frac() + log_sigma = (1 - w) * self.log_sigmas[low_idx] + w * self.log_sigmas[high_idx] + return log_sigma.exp() + +def model_sampling(model_config, model_type): + if model_type == ModelType.EPS: + c = EPS + elif model_type == ModelType.V_PREDICTION: + c = V_PREDICTION + + s = ModelSamplingDiscrete + + class ModelSampling(s, c): + pass + + return ModelSampling(model_config) + + + +class BaseModel(torch.nn.Module): + def __init__(self, model_config, model_type=ModelType.EPS, device=None): + super().__init__() + + unet_config = model_config.unet_config + self.latent_format = model_config.latent_format + self.model_config = model_config + + if not unet_config.get("disable_unet_model_creation", False): + self.diffusion_model = UNetModel(**unet_config, device=device) + self.model_type = model_type + self.model_sampling = model_sampling(model_config, model_type) + + self.adm_channels = unet_config.get("adm_in_channels", None) + if self.adm_channels is None: + self.adm_channels = 0 + self.inpaint_model = False + print("model_type", model_type.name) + print("adm", self.adm_channels) def apply_model(self, x, t, c_concat=None, c_crossattn=None, control=None, transformer_options={}, **kwargs): + sigma = t + xc = self.model_sampling.calculate_input(sigma, x) if c_concat is not None: - xc = torch.cat([x] + [c_concat], dim=1) - else: - xc = x + xc = torch.cat([xc] + [c_concat], dim=1) + context = c_crossattn dtype = self.get_dtype() xc = xc.to(dtype) - t = t.to(dtype) + t = self.model_sampling.timestep(t).to(dtype) context = context.to(dtype) extra_conds = {} for o in kwargs: extra_conds[o] = kwargs[o].to(dtype) - return self.diffusion_model(xc, t, context=context, control=control, transformer_options=transformer_options, **extra_conds).float() + model_output = self.diffusion_model(xc, t, context=context, control=control, transformer_options=transformer_options, **extra_conds).float() + return self.model_sampling.calculate_denoised(sigma, model_output, x) def get_dtype(self): return self.diffusion_model.dtype diff --git a/comfy/samplers.py b/comfy/samplers.py index f930aa39b..5f9c74557 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -13,7 +13,7 @@ import comfy.conds #The main sampling function shared by all the samplers -#Returns predicted noise +#Returns denoised def sampling_function(model_function, x, timestep, uncond, cond, cond_scale, model_options={}, seed=None): def get_area_and_mult(conds, x_in, timestep_in): area = (x_in.shape[2], x_in.shape[3], 0, 0) @@ -257,24 +257,15 @@ def sampling_function(model_function, x, timestep, uncond, cond, cond_scale, mod else: return uncond + (cond - uncond) * cond_scale - -class CompVisVDenoiser(k_diffusion_external.DiscreteVDDPMDenoiser): - def __init__(self, model, quantize=False, device='cpu'): - super().__init__(model, model.alphas_cumprod, quantize=quantize) - - def get_v(self, x, t, cond, **kwargs): - return self.inner_model.apply_model(x, t, cond, **kwargs) - - class CFGNoisePredictor(torch.nn.Module): def __init__(self, model): super().__init__() self.inner_model = model - self.alphas_cumprod = model.alphas_cumprod def apply_model(self, x, timestep, cond, uncond, cond_scale, model_options={}, seed=None): out = sampling_function(self.inner_model.apply_model, x, timestep, uncond, cond, cond_scale, model_options=model_options, seed=seed) return out - + def forward(self, *args, **kwargs): + return self.apply_model(*args, **kwargs) class KSamplerX0Inpaint(torch.nn.Module): def __init__(self, model): @@ -293,32 +284,40 @@ class KSamplerX0Inpaint(torch.nn.Module): return out def simple_scheduler(model, steps): + s = model.model_sampling sigs = [] - ss = len(model.sigmas) / steps + ss = len(s.sigmas) / steps for x in range(steps): - sigs += [float(model.sigmas[-(1 + int(x * ss))])] + sigs += [float(s.sigmas[-(1 + int(x * ss))])] sigs += [0.0] return torch.FloatTensor(sigs) def ddim_scheduler(model, steps): + s = model.model_sampling sigs = [] - ddim_timesteps = make_ddim_timesteps(ddim_discr_method="uniform", num_ddim_timesteps=steps, num_ddpm_timesteps=model.inner_model.inner_model.num_timesteps, verbose=False) - for x in range(len(ddim_timesteps) - 1, -1, -1): - ts = ddim_timesteps[x] - if ts > 999: - ts = 999 - sigs.append(model.t_to_sigma(torch.tensor(ts))) + ss = len(s.sigmas) // steps + x = 1 + while x < len(s.sigmas): + sigs += [float(s.sigmas[x])] + x += ss + sigs = sigs[::-1] sigs += [0.0] return torch.FloatTensor(sigs) -def sgm_scheduler(model, steps): +def normal_scheduler(model, steps, sgm=False, floor=False): + s = model.model_sampling + start = s.timestep(s.sigma_max) + end = s.timestep(s.sigma_min) + + if sgm: + timesteps = torch.linspace(start, end, steps + 1)[:-1] + else: + timesteps = torch.linspace(start, end, steps) + sigs = [] - timesteps = torch.linspace(model.inner_model.inner_model.num_timesteps - 1, 0, steps + 1)[:-1].type(torch.int) for x in range(len(timesteps)): ts = timesteps[x] - if ts > 999: - ts = 999 - sigs.append(model.t_to_sigma(torch.tensor(ts))) + sigs.append(s.sigma(ts)) sigs += [0.0] return torch.FloatTensor(sigs) @@ -508,7 +507,9 @@ class Sampler: pass def max_denoise(self, model_wrap, sigmas): - return math.isclose(float(model_wrap.sigma_max), float(sigmas[0]), rel_tol=1e-05) + max_sigma = float(model_wrap.inner_model.model_sampling.sigma_max) + sigma = float(sigmas[0]) + return math.isclose(max_sigma, sigma, rel_tol=1e-05) or sigma > max_sigma class DDIM(Sampler): def sample(self, model_wrap, sigmas, extra_args, callback, noise, latent_image=None, denoise_mask=None, disable_pbar=False): @@ -592,11 +593,7 @@ def ksampler(sampler_name, extra_options={}): def wrap_model(model): model_denoise = CFGNoisePredictor(model) - if model.model_type == model_base.ModelType.V_PREDICTION: - model_wrap = CompVisVDenoiser(model_denoise, quantize=True) - else: - model_wrap = k_diffusion_external.CompVisDenoiser(model_denoise, quantize=True) - return model_wrap + return model_denoise def sample(model, noise, positive, negative, cfg, device, sampler, sigmas, model_options={}, latent_image=None, denoise_mask=None, callback=None, disable_pbar=False, seed=None): positive = positive[:] @@ -637,19 +634,18 @@ SCHEDULER_NAMES = ["normal", "karras", "exponential", "sgm_uniform", "simple", " SAMPLER_NAMES = KSAMPLER_NAMES + ["ddim", "uni_pc", "uni_pc_bh2"] def calculate_sigmas_scheduler(model, scheduler_name, steps): - model_wrap = wrap_model(model) if scheduler_name == "karras": - sigmas = k_diffusion_sampling.get_sigmas_karras(n=steps, sigma_min=float(model_wrap.sigma_min), sigma_max=float(model_wrap.sigma_max)) + sigmas = k_diffusion_sampling.get_sigmas_karras(n=steps, sigma_min=float(model.model_sampling.sigma_min), sigma_max=float(model.model_sampling.sigma_max)) elif scheduler_name == "exponential": - sigmas = k_diffusion_sampling.get_sigmas_exponential(n=steps, sigma_min=float(model_wrap.sigma_min), sigma_max=float(model_wrap.sigma_max)) + sigmas = k_diffusion_sampling.get_sigmas_exponential(n=steps, sigma_min=float(model.model_sampling.sigma_min), sigma_max=float(model.model_sampling.sigma_max)) elif scheduler_name == "normal": - sigmas = model_wrap.get_sigmas(steps) + sigmas = normal_scheduler(model, steps) elif scheduler_name == "simple": - sigmas = simple_scheduler(model_wrap, steps) + sigmas = simple_scheduler(model, steps) elif scheduler_name == "ddim_uniform": - sigmas = ddim_scheduler(model_wrap, steps) + sigmas = ddim_scheduler(model, steps) elif scheduler_name == "sgm_uniform": - sigmas = sgm_scheduler(model_wrap, steps) + sigmas = normal_scheduler(model, steps, sgm=True) else: print("error invalid scheduler", self.scheduler) return sigmas From a268a574fab025deed91f5201910ac052132c42c Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 31 Oct 2023 18:11:29 -0400 Subject: [PATCH 045/170] Remove a bunch of useless code. DDIM is the same as euler with a small difference in the inpaint code. DDIM uses randn_like but I set a fixed seed instead. I'm keeping it in because I'm sure if I remove it people are going to complain. --- comfy/ldm/models/diffusion/__init__.py | 0 comfy/ldm/models/diffusion/ddim.py | 418 ------ .../models/diffusion/dpm_solver/__init__.py | 1 - .../models/diffusion/dpm_solver/dpm_solver.py | 1163 ----------------- .../models/diffusion/dpm_solver/sampler.py | 96 -- comfy/ldm/models/diffusion/plms.py | 245 ---- comfy/ldm/models/diffusion/sampling_util.py | 22 - comfy/samplers.py | 47 +- 8 files changed, 7 insertions(+), 1985 deletions(-) delete mode 100644 comfy/ldm/models/diffusion/__init__.py delete mode 100644 comfy/ldm/models/diffusion/ddim.py delete mode 100644 comfy/ldm/models/diffusion/dpm_solver/__init__.py delete mode 100644 comfy/ldm/models/diffusion/dpm_solver/dpm_solver.py delete mode 100644 comfy/ldm/models/diffusion/dpm_solver/sampler.py delete mode 100644 comfy/ldm/models/diffusion/plms.py delete mode 100644 comfy/ldm/models/diffusion/sampling_util.py diff --git a/comfy/ldm/models/diffusion/__init__.py b/comfy/ldm/models/diffusion/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/comfy/ldm/models/diffusion/ddim.py b/comfy/ldm/models/diffusion/ddim.py deleted file mode 100644 index 433d48e30..000000000 --- a/comfy/ldm/models/diffusion/ddim.py +++ /dev/null @@ -1,418 +0,0 @@ -"""SAMPLING ONLY.""" - -import torch -import numpy as np -from tqdm import tqdm - -from comfy.ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like, extract_into_tensor - - -class DDIMSampler(object): - def __init__(self, model, schedule="linear", device=torch.device("cuda"), **kwargs): - super().__init__() - self.model = model - self.ddpm_num_timesteps = model.num_timesteps - self.schedule = schedule - self.device = device - self.parameterization = kwargs.get("parameterization", "eps") - - def register_buffer(self, name, attr): - if type(attr) == torch.Tensor: - if attr.device != self.device: - attr = attr.float().to(self.device) - setattr(self, name, attr) - - def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True): - ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps, - num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose) - self.make_schedule_timesteps(ddim_timesteps, ddim_eta=ddim_eta, verbose=verbose) - - def make_schedule_timesteps(self, ddim_timesteps, ddim_eta=0., verbose=True): - self.ddim_timesteps = torch.tensor(ddim_timesteps) - alphas_cumprod = self.model.alphas_cumprod - assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep' - to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.device) - - self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) - self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev)) - - # calculations for diffusion q(x_t | x_{t-1}) and others - self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu()))) - self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu()))) - self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu()))) - self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu()))) - self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1))) - - # ddim sampling parameters - ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(), - ddim_timesteps=self.ddim_timesteps, - eta=ddim_eta,verbose=verbose) - self.register_buffer('ddim_sigmas', ddim_sigmas) - self.register_buffer('ddim_alphas', ddim_alphas) - self.register_buffer('ddim_alphas_prev', ddim_alphas_prev) - self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas)) - sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt( - (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * ( - 1 - self.alphas_cumprod / self.alphas_cumprod_prev)) - self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps) - - @torch.no_grad() - def sample_custom(self, - ddim_timesteps, - conditioning=None, - callback=None, - img_callback=None, - quantize_x0=False, - eta=0., - mask=None, - x0=None, - temperature=1., - noise_dropout=0., - score_corrector=None, - corrector_kwargs=None, - verbose=True, - x_T=None, - log_every_t=100, - unconditional_guidance_scale=1., - unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ... - dynamic_threshold=None, - ucg_schedule=None, - denoise_function=None, - extra_args=None, - to_zero=True, - end_step=None, - disable_pbar=False, - **kwargs - ): - self.make_schedule_timesteps(ddim_timesteps=ddim_timesteps, ddim_eta=eta, verbose=verbose) - samples, intermediates = self.ddim_sampling(conditioning, x_T.shape, - callback=callback, - img_callback=img_callback, - quantize_denoised=quantize_x0, - mask=mask, x0=x0, - ddim_use_original_steps=False, - noise_dropout=noise_dropout, - temperature=temperature, - score_corrector=score_corrector, - corrector_kwargs=corrector_kwargs, - x_T=x_T, - log_every_t=log_every_t, - unconditional_guidance_scale=unconditional_guidance_scale, - unconditional_conditioning=unconditional_conditioning, - dynamic_threshold=dynamic_threshold, - ucg_schedule=ucg_schedule, - denoise_function=denoise_function, - extra_args=extra_args, - to_zero=to_zero, - end_step=end_step, - disable_pbar=disable_pbar - ) - return samples, intermediates - - - @torch.no_grad() - def sample(self, - S, - batch_size, - shape, - conditioning=None, - callback=None, - normals_sequence=None, - img_callback=None, - quantize_x0=False, - eta=0., - mask=None, - x0=None, - temperature=1., - noise_dropout=0., - score_corrector=None, - corrector_kwargs=None, - verbose=True, - x_T=None, - log_every_t=100, - unconditional_guidance_scale=1., - unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ... - dynamic_threshold=None, - ucg_schedule=None, - **kwargs - ): - if conditioning is not None: - if isinstance(conditioning, dict): - ctmp = conditioning[list(conditioning.keys())[0]] - while isinstance(ctmp, list): ctmp = ctmp[0] - cbs = ctmp.shape[0] - if cbs != batch_size: - print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}") - - elif isinstance(conditioning, list): - for ctmp in conditioning: - if ctmp.shape[0] != batch_size: - print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}") - - else: - if conditioning.shape[0] != batch_size: - print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}") - - self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose) - # sampling - C, H, W = shape - size = (batch_size, C, H, W) - print(f'Data shape for DDIM sampling is {size}, eta {eta}') - - samples, intermediates = self.ddim_sampling(conditioning, size, - callback=callback, - img_callback=img_callback, - quantize_denoised=quantize_x0, - mask=mask, x0=x0, - ddim_use_original_steps=False, - noise_dropout=noise_dropout, - temperature=temperature, - score_corrector=score_corrector, - corrector_kwargs=corrector_kwargs, - x_T=x_T, - log_every_t=log_every_t, - unconditional_guidance_scale=unconditional_guidance_scale, - unconditional_conditioning=unconditional_conditioning, - dynamic_threshold=dynamic_threshold, - ucg_schedule=ucg_schedule, - denoise_function=None, - extra_args=None - ) - return samples, intermediates - - def q_sample(self, x_start, t, noise=None): - if noise is None: - noise = torch.randn_like(x_start) - return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise) - - @torch.no_grad() - def ddim_sampling(self, cond, shape, - x_T=None, ddim_use_original_steps=False, - callback=None, timesteps=None, quantize_denoised=False, - mask=None, x0=None, img_callback=None, log_every_t=100, - temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, - unconditional_guidance_scale=1., unconditional_conditioning=None, dynamic_threshold=None, - ucg_schedule=None, denoise_function=None, extra_args=None, to_zero=True, end_step=None, disable_pbar=False): - device = self.model.alphas_cumprod.device - b = shape[0] - if x_T is None: - img = torch.randn(shape, device=device) - else: - img = x_T - - if timesteps is None: - timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps - elif timesteps is not None and not ddim_use_original_steps: - subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1 - timesteps = self.ddim_timesteps[:subset_end] - - intermediates = {'x_inter': [img], 'pred_x0': [img]} - time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else timesteps.flip(0) - total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0] - # print(f"Running DDIM Sampling with {total_steps} timesteps") - - iterator = tqdm(time_range[:end_step], desc='DDIM Sampler', total=end_step, disable=disable_pbar) - - for i, step in enumerate(iterator): - index = total_steps - i - 1 - ts = torch.full((b,), step, device=device, dtype=torch.long) - - if mask is not None: - assert x0 is not None - img_orig = self.q_sample(x0, ts) # TODO: deterministic forward pass? - img = img_orig * mask + (1. - mask) * img - - if ucg_schedule is not None: - assert len(ucg_schedule) == len(time_range) - unconditional_guidance_scale = ucg_schedule[i] - - outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps, - quantize_denoised=quantize_denoised, temperature=temperature, - noise_dropout=noise_dropout, score_corrector=score_corrector, - corrector_kwargs=corrector_kwargs, - unconditional_guidance_scale=unconditional_guidance_scale, - unconditional_conditioning=unconditional_conditioning, - dynamic_threshold=dynamic_threshold, denoise_function=denoise_function, extra_args=extra_args) - img, pred_x0 = outs - if callback: callback(i) - if img_callback: img_callback(pred_x0, i) - - if index % log_every_t == 0 or index == total_steps - 1: - intermediates['x_inter'].append(img) - intermediates['pred_x0'].append(pred_x0) - - if to_zero: - img = pred_x0 - else: - if ddim_use_original_steps: - sqrt_alphas_cumprod = self.sqrt_alphas_cumprod - else: - sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas) - img /= sqrt_alphas_cumprod[index - 1] - - return img, intermediates - - @torch.no_grad() - def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False, - temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, - unconditional_guidance_scale=1., unconditional_conditioning=None, - dynamic_threshold=None, denoise_function=None, extra_args=None): - b, *_, device = *x.shape, x.device - - if denoise_function is not None: - model_output = denoise_function(x, t, **extra_args) - elif unconditional_conditioning is None or unconditional_guidance_scale == 1.: - model_output = self.model.apply_model(x, t, c) - else: - x_in = torch.cat([x] * 2) - t_in = torch.cat([t] * 2) - if isinstance(c, dict): - assert isinstance(unconditional_conditioning, dict) - c_in = dict() - for k in c: - if isinstance(c[k], list): - c_in[k] = [torch.cat([ - unconditional_conditioning[k][i], - c[k][i]]) for i in range(len(c[k]))] - else: - c_in[k] = torch.cat([ - unconditional_conditioning[k], - c[k]]) - elif isinstance(c, list): - c_in = list() - assert isinstance(unconditional_conditioning, list) - for i in range(len(c)): - c_in.append(torch.cat([unconditional_conditioning[i], c[i]])) - else: - c_in = torch.cat([unconditional_conditioning, c]) - model_uncond, model_t = self.model.apply_model(x_in, t_in, c_in).chunk(2) - model_output = model_uncond + unconditional_guidance_scale * (model_t - model_uncond) - - if self.parameterization == "v": - e_t = extract_into_tensor(self.sqrt_alphas_cumprod, t, x.shape) * model_output + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x.shape) * x - else: - e_t = model_output - - if score_corrector is not None: - assert self.parameterization == "eps", 'not implemented' - e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs) - - alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas - alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev - sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas - sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas - # select parameters corresponding to the currently considered timestep - a_t = torch.full((b, 1, 1, 1), alphas[index], device=device) - a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device) - sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device) - sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device) - - # current prediction for x_0 - if self.parameterization != "v": - pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt() - else: - pred_x0 = extract_into_tensor(self.sqrt_alphas_cumprod, t, x.shape) * x - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x.shape) * model_output - - if quantize_denoised: - pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0) - - if dynamic_threshold is not None: - raise NotImplementedError() - - # direction pointing to x_t - dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t - noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature - if noise_dropout > 0.: - noise = torch.nn.functional.dropout(noise, p=noise_dropout) - x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise - return x_prev, pred_x0 - - @torch.no_grad() - def encode(self, x0, c, t_enc, use_original_steps=False, return_intermediates=None, - unconditional_guidance_scale=1.0, unconditional_conditioning=None, callback=None): - num_reference_steps = self.ddpm_num_timesteps if use_original_steps else self.ddim_timesteps.shape[0] - - assert t_enc <= num_reference_steps - num_steps = t_enc - - if use_original_steps: - alphas_next = self.alphas_cumprod[:num_steps] - alphas = self.alphas_cumprod_prev[:num_steps] - else: - alphas_next = self.ddim_alphas[:num_steps] - alphas = torch.tensor(self.ddim_alphas_prev[:num_steps]) - - x_next = x0 - intermediates = [] - inter_steps = [] - for i in tqdm(range(num_steps), desc='Encoding Image'): - t = torch.full((x0.shape[0],), i, device=self.model.device, dtype=torch.long) - if unconditional_guidance_scale == 1.: - noise_pred = self.model.apply_model(x_next, t, c) - else: - assert unconditional_conditioning is not None - e_t_uncond, noise_pred = torch.chunk( - self.model.apply_model(torch.cat((x_next, x_next)), torch.cat((t, t)), - torch.cat((unconditional_conditioning, c))), 2) - noise_pred = e_t_uncond + unconditional_guidance_scale * (noise_pred - e_t_uncond) - - xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next - weighted_noise_pred = alphas_next[i].sqrt() * ( - (1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt()) * noise_pred - x_next = xt_weighted + weighted_noise_pred - if return_intermediates and i % ( - num_steps // return_intermediates) == 0 and i < num_steps - 1: - intermediates.append(x_next) - inter_steps.append(i) - elif return_intermediates and i >= num_steps - 2: - intermediates.append(x_next) - inter_steps.append(i) - if callback: callback(i) - - out = {'x_encoded': x_next, 'intermediate_steps': inter_steps} - if return_intermediates: - out.update({'intermediates': intermediates}) - return x_next, out - - @torch.no_grad() - def stochastic_encode(self, x0, t, use_original_steps=False, noise=None, max_denoise=False): - # fast, but does not allow for exact reconstruction - # t serves as an index to gather the correct alphas - if use_original_steps: - sqrt_alphas_cumprod = self.sqrt_alphas_cumprod - sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod - else: - sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas) - sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas - - if noise is None: - noise = torch.randn_like(x0) - if max_denoise: - noise_multiplier = 1.0 - else: - noise_multiplier = extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) - - return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 + noise_multiplier * noise) - - @torch.no_grad() - def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None, - use_original_steps=False, callback=None): - - timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps - timesteps = timesteps[:t_start] - - time_range = np.flip(timesteps) - total_steps = timesteps.shape[0] - print(f"Running DDIM Sampling with {total_steps} timesteps") - - iterator = tqdm(time_range, desc='Decoding image', total=total_steps) - x_dec = x_latent - for i, step in enumerate(iterator): - index = total_steps - i - 1 - ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long) - x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps, - unconditional_guidance_scale=unconditional_guidance_scale, - unconditional_conditioning=unconditional_conditioning) - if callback: callback(i) - return x_dec \ No newline at end of file diff --git a/comfy/ldm/models/diffusion/dpm_solver/__init__.py b/comfy/ldm/models/diffusion/dpm_solver/__init__.py deleted file mode 100644 index 7427f38c0..000000000 --- a/comfy/ldm/models/diffusion/dpm_solver/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .sampler import DPMSolverSampler \ No newline at end of file diff --git a/comfy/ldm/models/diffusion/dpm_solver/dpm_solver.py b/comfy/ldm/models/diffusion/dpm_solver/dpm_solver.py deleted file mode 100644 index da8d41f9c..000000000 --- a/comfy/ldm/models/diffusion/dpm_solver/dpm_solver.py +++ /dev/null @@ -1,1163 +0,0 @@ -import torch -import torch.nn.functional as F -import math -from tqdm import tqdm - - -class NoiseScheduleVP: - def __init__( - self, - schedule='discrete', - betas=None, - alphas_cumprod=None, - continuous_beta_0=0.1, - continuous_beta_1=20., - ): - """Create a wrapper class for the forward SDE (VP type). - *** - Update: We support discrete-time diffusion models by implementing a picewise linear interpolation for log_alpha_t. - We recommend to use schedule='discrete' for the discrete-time diffusion models, especially for high-resolution images. - *** - The forward SDE ensures that the condition distribution q_{t|0}(x_t | x_0) = N ( alpha_t * x_0, sigma_t^2 * I ). - We further define lambda_t = log(alpha_t) - log(sigma_t), which is the half-logSNR (described in the DPM-Solver paper). - Therefore, we implement the functions for computing alpha_t, sigma_t and lambda_t. For t in [0, T], we have: - log_alpha_t = self.marginal_log_mean_coeff(t) - sigma_t = self.marginal_std(t) - lambda_t = self.marginal_lambda(t) - Moreover, as lambda(t) is an invertible function, we also support its inverse function: - t = self.inverse_lambda(lambda_t) - =============================================================== - We support both discrete-time DPMs (trained on n = 0, 1, ..., N-1) and continuous-time DPMs (trained on t in [t_0, T]). - 1. For discrete-time DPMs: - For discrete-time DPMs trained on n = 0, 1, ..., N-1, we convert the discrete steps to continuous time steps by: - t_i = (i + 1) / N - e.g. for N = 1000, we have t_0 = 1e-3 and T = t_{N-1} = 1. - We solve the corresponding diffusion ODE from time T = 1 to time t_0 = 1e-3. - Args: - betas: A `torch.Tensor`. The beta array for the discrete-time DPM. (See the original DDPM paper for details) - alphas_cumprod: A `torch.Tensor`. The cumprod alphas for the discrete-time DPM. (See the original DDPM paper for details) - Note that we always have alphas_cumprod = cumprod(betas). Therefore, we only need to set one of `betas` and `alphas_cumprod`. - **Important**: Please pay special attention for the args for `alphas_cumprod`: - The `alphas_cumprod` is the \hat{alpha_n} arrays in the notations of DDPM. Specifically, DDPMs assume that - q_{t_n | 0}(x_{t_n} | x_0) = N ( \sqrt{\hat{alpha_n}} * x_0, (1 - \hat{alpha_n}) * I ). - Therefore, the notation \hat{alpha_n} is different from the notation alpha_t in DPM-Solver. In fact, we have - alpha_{t_n} = \sqrt{\hat{alpha_n}}, - and - log(alpha_{t_n}) = 0.5 * log(\hat{alpha_n}). - 2. For continuous-time DPMs: - We support two types of VPSDEs: linear (DDPM) and cosine (improved-DDPM). The hyperparameters for the noise - schedule are the default settings in DDPM and improved-DDPM: - Args: - beta_min: A `float` number. The smallest beta for the linear schedule. - beta_max: A `float` number. The largest beta for the linear schedule. - cosine_s: A `float` number. The hyperparameter in the cosine schedule. - cosine_beta_max: A `float` number. The hyperparameter in the cosine schedule. - T: A `float` number. The ending time of the forward process. - =============================================================== - Args: - schedule: A `str`. The noise schedule of the forward SDE. 'discrete' for discrete-time DPMs, - 'linear' or 'cosine' for continuous-time DPMs. - Returns: - A wrapper object of the forward SDE (VP type). - - =============================================================== - Example: - # For discrete-time DPMs, given betas (the beta array for n = 0, 1, ..., N - 1): - >>> ns = NoiseScheduleVP('discrete', betas=betas) - # For discrete-time DPMs, given alphas_cumprod (the \hat{alpha_n} array for n = 0, 1, ..., N - 1): - >>> ns = NoiseScheduleVP('discrete', alphas_cumprod=alphas_cumprod) - # For continuous-time DPMs (VPSDE), linear schedule: - >>> ns = NoiseScheduleVP('linear', continuous_beta_0=0.1, continuous_beta_1=20.) - """ - - if schedule not in ['discrete', 'linear', 'cosine']: - raise ValueError( - "Unsupported noise schedule {}. The schedule needs to be 'discrete' or 'linear' or 'cosine'".format( - schedule)) - - self.schedule = schedule - if schedule == 'discrete': - if betas is not None: - log_alphas = 0.5 * torch.log(1 - betas).cumsum(dim=0) - else: - assert alphas_cumprod is not None - log_alphas = 0.5 * torch.log(alphas_cumprod) - self.total_N = len(log_alphas) - self.T = 1. - self.t_array = torch.linspace(0., 1., self.total_N + 1)[1:].reshape((1, -1)) - self.log_alpha_array = log_alphas.reshape((1, -1,)) - else: - self.total_N = 1000 - self.beta_0 = continuous_beta_0 - self.beta_1 = continuous_beta_1 - self.cosine_s = 0.008 - self.cosine_beta_max = 999. - self.cosine_t_max = math.atan(self.cosine_beta_max * (1. + self.cosine_s) / math.pi) * 2. * ( - 1. + self.cosine_s) / math.pi - self.cosine_s - self.cosine_log_alpha_0 = math.log(math.cos(self.cosine_s / (1. + self.cosine_s) * math.pi / 2.)) - self.schedule = schedule - if schedule == 'cosine': - # For the cosine schedule, T = 1 will have numerical issues. So we manually set the ending time T. - # Note that T = 0.9946 may be not the optimal setting. However, we find it works well. - self.T = 0.9946 - else: - self.T = 1. - - def marginal_log_mean_coeff(self, t): - """ - Compute log(alpha_t) of a given continuous-time label t in [0, T]. - """ - if self.schedule == 'discrete': - return interpolate_fn(t.reshape((-1, 1)), self.t_array.to(t.device), - self.log_alpha_array.to(t.device)).reshape((-1)) - elif self.schedule == 'linear': - return -0.25 * t ** 2 * (self.beta_1 - self.beta_0) - 0.5 * t * self.beta_0 - elif self.schedule == 'cosine': - log_alpha_fn = lambda s: torch.log(torch.cos((s + self.cosine_s) / (1. + self.cosine_s) * math.pi / 2.)) - log_alpha_t = log_alpha_fn(t) - self.cosine_log_alpha_0 - return log_alpha_t - - def marginal_alpha(self, t): - """ - Compute alpha_t of a given continuous-time label t in [0, T]. - """ - return torch.exp(self.marginal_log_mean_coeff(t)) - - def marginal_std(self, t): - """ - Compute sigma_t of a given continuous-time label t in [0, T]. - """ - return torch.sqrt(1. - torch.exp(2. * self.marginal_log_mean_coeff(t))) - - def marginal_lambda(self, t): - """ - Compute lambda_t = log(alpha_t) - log(sigma_t) of a given continuous-time label t in [0, T]. - """ - log_mean_coeff = self.marginal_log_mean_coeff(t) - log_std = 0.5 * torch.log(1. - torch.exp(2. * log_mean_coeff)) - return log_mean_coeff - log_std - - def inverse_lambda(self, lamb): - """ - Compute the continuous-time label t in [0, T] of a given half-logSNR lambda_t. - """ - if self.schedule == 'linear': - tmp = 2. * (self.beta_1 - self.beta_0) * torch.logaddexp(-2. * lamb, torch.zeros((1,)).to(lamb)) - Delta = self.beta_0 ** 2 + tmp - return tmp / (torch.sqrt(Delta) + self.beta_0) / (self.beta_1 - self.beta_0) - elif self.schedule == 'discrete': - log_alpha = -0.5 * torch.logaddexp(torch.zeros((1,)).to(lamb.device), -2. * lamb) - t = interpolate_fn(log_alpha.reshape((-1, 1)), torch.flip(self.log_alpha_array.to(lamb.device), [1]), - torch.flip(self.t_array.to(lamb.device), [1])) - return t.reshape((-1,)) - else: - log_alpha = -0.5 * torch.logaddexp(-2. * lamb, torch.zeros((1,)).to(lamb)) - t_fn = lambda log_alpha_t: torch.arccos(torch.exp(log_alpha_t + self.cosine_log_alpha_0)) * 2. * ( - 1. + self.cosine_s) / math.pi - self.cosine_s - t = t_fn(log_alpha) - return t - - -def model_wrapper( - model, - noise_schedule, - model_type="noise", - model_kwargs={}, - guidance_type="uncond", - condition=None, - unconditional_condition=None, - guidance_scale=1., - classifier_fn=None, - classifier_kwargs={}, -): - """Create a wrapper function for the noise prediction model. - DPM-Solver needs to solve the continuous-time diffusion ODEs. For DPMs trained on discrete-time labels, we need to - firstly wrap the model function to a noise prediction model that accepts the continuous time as the input. - We support four types of the diffusion model by setting `model_type`: - 1. "noise": noise prediction model. (Trained by predicting noise). - 2. "x_start": data prediction model. (Trained by predicting the data x_0 at time 0). - 3. "v": velocity prediction model. (Trained by predicting the velocity). - The "v" prediction is derivation detailed in Appendix D of [1], and is used in Imagen-Video [2]. - [1] Salimans, Tim, and Jonathan Ho. "Progressive distillation for fast sampling of diffusion models." - arXiv preprint arXiv:2202.00512 (2022). - [2] Ho, Jonathan, et al. "Imagen Video: High Definition Video Generation with Diffusion Models." - arXiv preprint arXiv:2210.02303 (2022). - - 4. "score": marginal score function. (Trained by denoising score matching). - Note that the score function and the noise prediction model follows a simple relationship: - ``` - noise(x_t, t) = -sigma_t * score(x_t, t) - ``` - We support three types of guided sampling by DPMs by setting `guidance_type`: - 1. "uncond": unconditional sampling by DPMs. - The input `model` has the following format: - `` - model(x, t_input, **model_kwargs) -> noise | x_start | v | score - `` - 2. "classifier": classifier guidance sampling [3] by DPMs and another classifier. - The input `model` has the following format: - `` - model(x, t_input, **model_kwargs) -> noise | x_start | v | score - `` - The input `classifier_fn` has the following format: - `` - classifier_fn(x, t_input, cond, **classifier_kwargs) -> logits(x, t_input, cond) - `` - [3] P. Dhariwal and A. Q. Nichol, "Diffusion models beat GANs on image synthesis," - in Advances in Neural Information Processing Systems, vol. 34, 2021, pp. 8780-8794. - 3. "classifier-free": classifier-free guidance sampling by conditional DPMs. - The input `model` has the following format: - `` - model(x, t_input, cond, **model_kwargs) -> noise | x_start | v | score - `` - And if cond == `unconditional_condition`, the model output is the unconditional DPM output. - [4] Ho, Jonathan, and Tim Salimans. "Classifier-free diffusion guidance." - arXiv preprint arXiv:2207.12598 (2022). - - The `t_input` is the time label of the model, which may be discrete-time labels (i.e. 0 to 999) - or continuous-time labels (i.e. epsilon to T). - We wrap the model function to accept only `x` and `t_continuous` as inputs, and outputs the predicted noise: - `` - def model_fn(x, t_continuous) -> noise: - t_input = get_model_input_time(t_continuous) - return noise_pred(model, x, t_input, **model_kwargs) - `` - where `t_continuous` is the continuous time labels (i.e. epsilon to T). And we use `model_fn` for DPM-Solver. - =============================================================== - Args: - model: A diffusion model with the corresponding format described above. - noise_schedule: A noise schedule object, such as NoiseScheduleVP. - model_type: A `str`. The parameterization type of the diffusion model. - "noise" or "x_start" or "v" or "score". - model_kwargs: A `dict`. A dict for the other inputs of the model function. - guidance_type: A `str`. The type of the guidance for sampling. - "uncond" or "classifier" or "classifier-free". - condition: A pytorch tensor. The condition for the guided sampling. - Only used for "classifier" or "classifier-free" guidance type. - unconditional_condition: A pytorch tensor. The condition for the unconditional sampling. - Only used for "classifier-free" guidance type. - guidance_scale: A `float`. The scale for the guided sampling. - classifier_fn: A classifier function. Only used for the classifier guidance. - classifier_kwargs: A `dict`. A dict for the other inputs of the classifier function. - Returns: - A noise prediction model that accepts the noised data and the continuous time as the inputs. - """ - - def get_model_input_time(t_continuous): - """ - Convert the continuous-time `t_continuous` (in [epsilon, T]) to the model input time. - For discrete-time DPMs, we convert `t_continuous` in [1 / N, 1] to `t_input` in [0, 1000 * (N - 1) / N]. - For continuous-time DPMs, we just use `t_continuous`. - """ - if noise_schedule.schedule == 'discrete': - return (t_continuous - 1. / noise_schedule.total_N) * 1000. - else: - return t_continuous - - def noise_pred_fn(x, t_continuous, cond=None): - if t_continuous.reshape((-1,)).shape[0] == 1: - t_continuous = t_continuous.expand((x.shape[0])) - t_input = get_model_input_time(t_continuous) - if cond is None: - output = model(x, t_input, **model_kwargs) - else: - output = model(x, t_input, cond, **model_kwargs) - if model_type == "noise": - return output - elif model_type == "x_start": - alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous) - dims = x.dim() - return (x - expand_dims(alpha_t, dims) * output) / expand_dims(sigma_t, dims) - elif model_type == "v": - alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous) - dims = x.dim() - return expand_dims(alpha_t, dims) * output + expand_dims(sigma_t, dims) * x - elif model_type == "score": - sigma_t = noise_schedule.marginal_std(t_continuous) - dims = x.dim() - return -expand_dims(sigma_t, dims) * output - - def cond_grad_fn(x, t_input): - """ - Compute the gradient of the classifier, i.e. nabla_{x} log p_t(cond | x_t). - """ - with torch.enable_grad(): - x_in = x.detach().requires_grad_(True) - log_prob = classifier_fn(x_in, t_input, condition, **classifier_kwargs) - return torch.autograd.grad(log_prob.sum(), x_in)[0] - - def model_fn(x, t_continuous): - """ - The noise predicition model function that is used for DPM-Solver. - """ - if t_continuous.reshape((-1,)).shape[0] == 1: - t_continuous = t_continuous.expand((x.shape[0])) - if guidance_type == "uncond": - return noise_pred_fn(x, t_continuous) - elif guidance_type == "classifier": - assert classifier_fn is not None - t_input = get_model_input_time(t_continuous) - cond_grad = cond_grad_fn(x, t_input) - sigma_t = noise_schedule.marginal_std(t_continuous) - noise = noise_pred_fn(x, t_continuous) - return noise - guidance_scale * expand_dims(sigma_t, dims=cond_grad.dim()) * cond_grad - elif guidance_type == "classifier-free": - if guidance_scale == 1. or unconditional_condition is None: - return noise_pred_fn(x, t_continuous, cond=condition) - else: - x_in = torch.cat([x] * 2) - t_in = torch.cat([t_continuous] * 2) - if isinstance(condition, dict): - assert isinstance(unconditional_condition, dict) - c_in = dict() - for k in condition: - if isinstance(condition[k], list): - c_in[k] = [torch.cat([unconditional_condition[k][i], condition[k][i]]) for i in range(len(condition[k]))] - else: - c_in[k] = torch.cat([unconditional_condition[k], condition[k]]) - else: - c_in = torch.cat([unconditional_condition, condition]) - noise_uncond, noise = noise_pred_fn(x_in, t_in, cond=c_in).chunk(2) - return noise_uncond + guidance_scale * (noise - noise_uncond) - - assert model_type in ["noise", "x_start", "v"] - assert guidance_type in ["uncond", "classifier", "classifier-free"] - return model_fn - - -class DPM_Solver: - def __init__(self, model_fn, noise_schedule, predict_x0=False, thresholding=False, max_val=1.): - """Construct a DPM-Solver. - We support both the noise prediction model ("predicting epsilon") and the data prediction model ("predicting x0"). - If `predict_x0` is False, we use the solver for the noise prediction model (DPM-Solver). - If `predict_x0` is True, we use the solver for the data prediction model (DPM-Solver++). - In such case, we further support the "dynamic thresholding" in [1] when `thresholding` is True. - The "dynamic thresholding" can greatly improve the sample quality for pixel-space DPMs with large guidance scales. - Args: - model_fn: A noise prediction model function which accepts the continuous-time input (t in [epsilon, T]): - `` - def model_fn(x, t_continuous): - return noise - `` - noise_schedule: A noise schedule object, such as NoiseScheduleVP. - predict_x0: A `bool`. If true, use the data prediction model; else, use the noise prediction model. - thresholding: A `bool`. Valid when `predict_x0` is True. Whether to use the "dynamic thresholding" in [1]. - max_val: A `float`. Valid when both `predict_x0` and `thresholding` are True. The max value for thresholding. - - [1] Chitwan Saharia, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily Denton, Seyed Kamyar Seyed Ghasemipour, Burcu Karagol Ayan, S Sara Mahdavi, Rapha Gontijo Lopes, et al. Photorealistic text-to-image diffusion models with deep language understanding. arXiv preprint arXiv:2205.11487, 2022b. - """ - self.model = model_fn - self.noise_schedule = noise_schedule - self.predict_x0 = predict_x0 - self.thresholding = thresholding - self.max_val = max_val - - def noise_prediction_fn(self, x, t): - """ - Return the noise prediction model. - """ - return self.model(x, t) - - def data_prediction_fn(self, x, t): - """ - Return the data prediction model (with thresholding). - """ - noise = self.noise_prediction_fn(x, t) - dims = x.dim() - alpha_t, sigma_t = self.noise_schedule.marginal_alpha(t), self.noise_schedule.marginal_std(t) - x0 = (x - expand_dims(sigma_t, dims) * noise) / expand_dims(alpha_t, dims) - if self.thresholding: - p = 0.995 # A hyperparameter in the paper of "Imagen" [1]. - s = torch.quantile(torch.abs(x0).reshape((x0.shape[0], -1)), p, dim=1) - s = expand_dims(torch.maximum(s, self.max_val * torch.ones_like(s).to(s.device)), dims) - x0 = torch.clamp(x0, -s, s) / s - return x0 - - def model_fn(self, x, t): - """ - Convert the model to the noise prediction model or the data prediction model. - """ - if self.predict_x0: - return self.data_prediction_fn(x, t) - else: - return self.noise_prediction_fn(x, t) - - def get_time_steps(self, skip_type, t_T, t_0, N, device): - """Compute the intermediate time steps for sampling. - Args: - skip_type: A `str`. The type for the spacing of the time steps. We support three types: - - 'logSNR': uniform logSNR for the time steps. - - 'time_uniform': uniform time for the time steps. (**Recommended for high-resolutional data**.) - - 'time_quadratic': quadratic time for the time steps. (Used in DDIM for low-resolutional data.) - t_T: A `float`. The starting time of the sampling (default is T). - t_0: A `float`. The ending time of the sampling (default is epsilon). - N: A `int`. The total number of the spacing of the time steps. - device: A torch device. - Returns: - A pytorch tensor of the time steps, with the shape (N + 1,). - """ - if skip_type == 'logSNR': - lambda_T = self.noise_schedule.marginal_lambda(torch.tensor(t_T).to(device)) - lambda_0 = self.noise_schedule.marginal_lambda(torch.tensor(t_0).to(device)) - logSNR_steps = torch.linspace(lambda_T.cpu().item(), lambda_0.cpu().item(), N + 1).to(device) - return self.noise_schedule.inverse_lambda(logSNR_steps) - elif skip_type == 'time_uniform': - return torch.linspace(t_T, t_0, N + 1).to(device) - elif skip_type == 'time_quadratic': - t_order = 2 - t = torch.linspace(t_T ** (1. / t_order), t_0 ** (1. / t_order), N + 1).pow(t_order).to(device) - return t - else: - raise ValueError( - "Unsupported skip_type {}, need to be 'logSNR' or 'time_uniform' or 'time_quadratic'".format(skip_type)) - - def get_orders_and_timesteps_for_singlestep_solver(self, steps, order, skip_type, t_T, t_0, device): - """ - Get the order of each step for sampling by the singlestep DPM-Solver. - We combine both DPM-Solver-1,2,3 to use all the function evaluations, which is named as "DPM-Solver-fast". - Given a fixed number of function evaluations by `steps`, the sampling procedure by DPM-Solver-fast is: - - If order == 1: - We take `steps` of DPM-Solver-1 (i.e. DDIM). - - If order == 2: - - Denote K = (steps // 2). We take K or (K + 1) intermediate time steps for sampling. - - If steps % 2 == 0, we use K steps of DPM-Solver-2. - - If steps % 2 == 1, we use K steps of DPM-Solver-2 and 1 step of DPM-Solver-1. - - If order == 3: - - Denote K = (steps // 3 + 1). We take K intermediate time steps for sampling. - - If steps % 3 == 0, we use (K - 2) steps of DPM-Solver-3, and 1 step of DPM-Solver-2 and 1 step of DPM-Solver-1. - - If steps % 3 == 1, we use (K - 1) steps of DPM-Solver-3 and 1 step of DPM-Solver-1. - - If steps % 3 == 2, we use (K - 1) steps of DPM-Solver-3 and 1 step of DPM-Solver-2. - ============================================ - Args: - order: A `int`. The max order for the solver (2 or 3). - steps: A `int`. The total number of function evaluations (NFE). - skip_type: A `str`. The type for the spacing of the time steps. We support three types: - - 'logSNR': uniform logSNR for the time steps. - - 'time_uniform': uniform time for the time steps. (**Recommended for high-resolutional data**.) - - 'time_quadratic': quadratic time for the time steps. (Used in DDIM for low-resolutional data.) - t_T: A `float`. The starting time of the sampling (default is T). - t_0: A `float`. The ending time of the sampling (default is epsilon). - device: A torch device. - Returns: - orders: A list of the solver order of each step. - """ - if order == 3: - K = steps // 3 + 1 - if steps % 3 == 0: - orders = [3, ] * (K - 2) + [2, 1] - elif steps % 3 == 1: - orders = [3, ] * (K - 1) + [1] - else: - orders = [3, ] * (K - 1) + [2] - elif order == 2: - if steps % 2 == 0: - K = steps // 2 - orders = [2, ] * K - else: - K = steps // 2 + 1 - orders = [2, ] * (K - 1) + [1] - elif order == 1: - K = 1 - orders = [1, ] * steps - else: - raise ValueError("'order' must be '1' or '2' or '3'.") - if skip_type == 'logSNR': - # To reproduce the results in DPM-Solver paper - timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, K, device) - else: - timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, steps, device)[ - torch.cumsum(torch.tensor([0, ] + orders)).to(device)] - return timesteps_outer, orders - - def denoise_to_zero_fn(self, x, s): - """ - Denoise at the final step, which is equivalent to solve the ODE from lambda_s to infty by first-order discretization. - """ - return self.data_prediction_fn(x, s) - - def dpm_solver_first_update(self, x, s, t, model_s=None, return_intermediate=False): - """ - DPM-Solver-1 (equivalent to DDIM) from time `s` to time `t`. - Args: - x: A pytorch tensor. The initial value at time `s`. - s: A pytorch tensor. The starting time, with the shape (x.shape[0],). - t: A pytorch tensor. The ending time, with the shape (x.shape[0],). - model_s: A pytorch tensor. The model function evaluated at time `s`. - If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it. - return_intermediate: A `bool`. If true, also return the model value at time `s`. - Returns: - x_t: A pytorch tensor. The approximated solution at time `t`. - """ - ns = self.noise_schedule - dims = x.dim() - lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t) - h = lambda_t - lambda_s - log_alpha_s, log_alpha_t = ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff(t) - sigma_s, sigma_t = ns.marginal_std(s), ns.marginal_std(t) - alpha_t = torch.exp(log_alpha_t) - - if self.predict_x0: - phi_1 = torch.expm1(-h) - if model_s is None: - model_s = self.model_fn(x, s) - x_t = ( - expand_dims(sigma_t / sigma_s, dims) * x - - expand_dims(alpha_t * phi_1, dims) * model_s - ) - if return_intermediate: - return x_t, {'model_s': model_s} - else: - return x_t - else: - phi_1 = torch.expm1(h) - if model_s is None: - model_s = self.model_fn(x, s) - x_t = ( - expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x - - expand_dims(sigma_t * phi_1, dims) * model_s - ) - if return_intermediate: - return x_t, {'model_s': model_s} - else: - return x_t - - def singlestep_dpm_solver_second_update(self, x, s, t, r1=0.5, model_s=None, return_intermediate=False, - solver_type='dpm_solver'): - """ - Singlestep solver DPM-Solver-2 from time `s` to time `t`. - Args: - x: A pytorch tensor. The initial value at time `s`. - s: A pytorch tensor. The starting time, with the shape (x.shape[0],). - t: A pytorch tensor. The ending time, with the shape (x.shape[0],). - r1: A `float`. The hyperparameter of the second-order solver. - model_s: A pytorch tensor. The model function evaluated at time `s`. - If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it. - return_intermediate: A `bool`. If true, also return the model value at time `s` and `s1` (the intermediate time). - solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. - The type slightly impacts the performance. We recommend to use 'dpm_solver' type. - Returns: - x_t: A pytorch tensor. The approximated solution at time `t`. - """ - if solver_type not in ['dpm_solver', 'taylor']: - raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type)) - if r1 is None: - r1 = 0.5 - ns = self.noise_schedule - dims = x.dim() - lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t) - h = lambda_t - lambda_s - lambda_s1 = lambda_s + r1 * h - s1 = ns.inverse_lambda(lambda_s1) - log_alpha_s, log_alpha_s1, log_alpha_t = ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff( - s1), ns.marginal_log_mean_coeff(t) - sigma_s, sigma_s1, sigma_t = ns.marginal_std(s), ns.marginal_std(s1), ns.marginal_std(t) - alpha_s1, alpha_t = torch.exp(log_alpha_s1), torch.exp(log_alpha_t) - - if self.predict_x0: - phi_11 = torch.expm1(-r1 * h) - phi_1 = torch.expm1(-h) - - if model_s is None: - model_s = self.model_fn(x, s) - x_s1 = ( - expand_dims(sigma_s1 / sigma_s, dims) * x - - expand_dims(alpha_s1 * phi_11, dims) * model_s - ) - model_s1 = self.model_fn(x_s1, s1) - if solver_type == 'dpm_solver': - x_t = ( - expand_dims(sigma_t / sigma_s, dims) * x - - expand_dims(alpha_t * phi_1, dims) * model_s - - (0.5 / r1) * expand_dims(alpha_t * phi_1, dims) * (model_s1 - model_s) - ) - elif solver_type == 'taylor': - x_t = ( - expand_dims(sigma_t / sigma_s, dims) * x - - expand_dims(alpha_t * phi_1, dims) * model_s - + (1. / r1) * expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * ( - model_s1 - model_s) - ) - else: - phi_11 = torch.expm1(r1 * h) - phi_1 = torch.expm1(h) - - if model_s is None: - model_s = self.model_fn(x, s) - x_s1 = ( - expand_dims(torch.exp(log_alpha_s1 - log_alpha_s), dims) * x - - expand_dims(sigma_s1 * phi_11, dims) * model_s - ) - model_s1 = self.model_fn(x_s1, s1) - if solver_type == 'dpm_solver': - x_t = ( - expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x - - expand_dims(sigma_t * phi_1, dims) * model_s - - (0.5 / r1) * expand_dims(sigma_t * phi_1, dims) * (model_s1 - model_s) - ) - elif solver_type == 'taylor': - x_t = ( - expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x - - expand_dims(sigma_t * phi_1, dims) * model_s - - (1. / r1) * expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * (model_s1 - model_s) - ) - if return_intermediate: - return x_t, {'model_s': model_s, 'model_s1': model_s1} - else: - return x_t - - def singlestep_dpm_solver_third_update(self, x, s, t, r1=1. / 3., r2=2. / 3., model_s=None, model_s1=None, - return_intermediate=False, solver_type='dpm_solver'): - """ - Singlestep solver DPM-Solver-3 from time `s` to time `t`. - Args: - x: A pytorch tensor. The initial value at time `s`. - s: A pytorch tensor. The starting time, with the shape (x.shape[0],). - t: A pytorch tensor. The ending time, with the shape (x.shape[0],). - r1: A `float`. The hyperparameter of the third-order solver. - r2: A `float`. The hyperparameter of the third-order solver. - model_s: A pytorch tensor. The model function evaluated at time `s`. - If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it. - model_s1: A pytorch tensor. The model function evaluated at time `s1` (the intermediate time given by `r1`). - If `model_s1` is None, we evaluate the model at `s1`; otherwise we directly use it. - return_intermediate: A `bool`. If true, also return the model value at time `s`, `s1` and `s2` (the intermediate times). - solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. - The type slightly impacts the performance. We recommend to use 'dpm_solver' type. - Returns: - x_t: A pytorch tensor. The approximated solution at time `t`. - """ - if solver_type not in ['dpm_solver', 'taylor']: - raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type)) - if r1 is None: - r1 = 1. / 3. - if r2 is None: - r2 = 2. / 3. - ns = self.noise_schedule - dims = x.dim() - lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t) - h = lambda_t - lambda_s - lambda_s1 = lambda_s + r1 * h - lambda_s2 = lambda_s + r2 * h - s1 = ns.inverse_lambda(lambda_s1) - s2 = ns.inverse_lambda(lambda_s2) - log_alpha_s, log_alpha_s1, log_alpha_s2, log_alpha_t = ns.marginal_log_mean_coeff( - s), ns.marginal_log_mean_coeff(s1), ns.marginal_log_mean_coeff(s2), ns.marginal_log_mean_coeff(t) - sigma_s, sigma_s1, sigma_s2, sigma_t = ns.marginal_std(s), ns.marginal_std(s1), ns.marginal_std( - s2), ns.marginal_std(t) - alpha_s1, alpha_s2, alpha_t = torch.exp(log_alpha_s1), torch.exp(log_alpha_s2), torch.exp(log_alpha_t) - - if self.predict_x0: - phi_11 = torch.expm1(-r1 * h) - phi_12 = torch.expm1(-r2 * h) - phi_1 = torch.expm1(-h) - phi_22 = torch.expm1(-r2 * h) / (r2 * h) + 1. - phi_2 = phi_1 / h + 1. - phi_3 = phi_2 / h - 0.5 - - if model_s is None: - model_s = self.model_fn(x, s) - if model_s1 is None: - x_s1 = ( - expand_dims(sigma_s1 / sigma_s, dims) * x - - expand_dims(alpha_s1 * phi_11, dims) * model_s - ) - model_s1 = self.model_fn(x_s1, s1) - x_s2 = ( - expand_dims(sigma_s2 / sigma_s, dims) * x - - expand_dims(alpha_s2 * phi_12, dims) * model_s - + r2 / r1 * expand_dims(alpha_s2 * phi_22, dims) * (model_s1 - model_s) - ) - model_s2 = self.model_fn(x_s2, s2) - if solver_type == 'dpm_solver': - x_t = ( - expand_dims(sigma_t / sigma_s, dims) * x - - expand_dims(alpha_t * phi_1, dims) * model_s - + (1. / r2) * expand_dims(alpha_t * phi_2, dims) * (model_s2 - model_s) - ) - elif solver_type == 'taylor': - D1_0 = (1. / r1) * (model_s1 - model_s) - D1_1 = (1. / r2) * (model_s2 - model_s) - D1 = (r2 * D1_0 - r1 * D1_1) / (r2 - r1) - D2 = 2. * (D1_1 - D1_0) / (r2 - r1) - x_t = ( - expand_dims(sigma_t / sigma_s, dims) * x - - expand_dims(alpha_t * phi_1, dims) * model_s - + expand_dims(alpha_t * phi_2, dims) * D1 - - expand_dims(alpha_t * phi_3, dims) * D2 - ) - else: - phi_11 = torch.expm1(r1 * h) - phi_12 = torch.expm1(r2 * h) - phi_1 = torch.expm1(h) - phi_22 = torch.expm1(r2 * h) / (r2 * h) - 1. - phi_2 = phi_1 / h - 1. - phi_3 = phi_2 / h - 0.5 - - if model_s is None: - model_s = self.model_fn(x, s) - if model_s1 is None: - x_s1 = ( - expand_dims(torch.exp(log_alpha_s1 - log_alpha_s), dims) * x - - expand_dims(sigma_s1 * phi_11, dims) * model_s - ) - model_s1 = self.model_fn(x_s1, s1) - x_s2 = ( - expand_dims(torch.exp(log_alpha_s2 - log_alpha_s), dims) * x - - expand_dims(sigma_s2 * phi_12, dims) * model_s - - r2 / r1 * expand_dims(sigma_s2 * phi_22, dims) * (model_s1 - model_s) - ) - model_s2 = self.model_fn(x_s2, s2) - if solver_type == 'dpm_solver': - x_t = ( - expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x - - expand_dims(sigma_t * phi_1, dims) * model_s - - (1. / r2) * expand_dims(sigma_t * phi_2, dims) * (model_s2 - model_s) - ) - elif solver_type == 'taylor': - D1_0 = (1. / r1) * (model_s1 - model_s) - D1_1 = (1. / r2) * (model_s2 - model_s) - D1 = (r2 * D1_0 - r1 * D1_1) / (r2 - r1) - D2 = 2. * (D1_1 - D1_0) / (r2 - r1) - x_t = ( - expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x - - expand_dims(sigma_t * phi_1, dims) * model_s - - expand_dims(sigma_t * phi_2, dims) * D1 - - expand_dims(sigma_t * phi_3, dims) * D2 - ) - - if return_intermediate: - return x_t, {'model_s': model_s, 'model_s1': model_s1, 'model_s2': model_s2} - else: - return x_t - - def multistep_dpm_solver_second_update(self, x, model_prev_list, t_prev_list, t, solver_type="dpm_solver"): - """ - Multistep solver DPM-Solver-2 from time `t_prev_list[-1]` to time `t`. - Args: - x: A pytorch tensor. The initial value at time `s`. - model_prev_list: A list of pytorch tensor. The previous computed model values. - t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],) - t: A pytorch tensor. The ending time, with the shape (x.shape[0],). - solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. - The type slightly impacts the performance. We recommend to use 'dpm_solver' type. - Returns: - x_t: A pytorch tensor. The approximated solution at time `t`. - """ - if solver_type not in ['dpm_solver', 'taylor']: - raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type)) - ns = self.noise_schedule - dims = x.dim() - model_prev_1, model_prev_0 = model_prev_list - t_prev_1, t_prev_0 = t_prev_list - lambda_prev_1, lambda_prev_0, lambda_t = ns.marginal_lambda(t_prev_1), ns.marginal_lambda( - t_prev_0), ns.marginal_lambda(t) - log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t) - sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t) - alpha_t = torch.exp(log_alpha_t) - - h_0 = lambda_prev_0 - lambda_prev_1 - h = lambda_t - lambda_prev_0 - r0 = h_0 / h - D1_0 = expand_dims(1. / r0, dims) * (model_prev_0 - model_prev_1) - if self.predict_x0: - if solver_type == 'dpm_solver': - x_t = ( - expand_dims(sigma_t / sigma_prev_0, dims) * x - - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0 - - 0.5 * expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * D1_0 - ) - elif solver_type == 'taylor': - x_t = ( - expand_dims(sigma_t / sigma_prev_0, dims) * x - - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0 - + expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * D1_0 - ) - else: - if solver_type == 'dpm_solver': - x_t = ( - expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x - - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0 - - 0.5 * expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * D1_0 - ) - elif solver_type == 'taylor': - x_t = ( - expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x - - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0 - - expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * D1_0 - ) - return x_t - - def multistep_dpm_solver_third_update(self, x, model_prev_list, t_prev_list, t, solver_type='dpm_solver'): - """ - Multistep solver DPM-Solver-3 from time `t_prev_list[-1]` to time `t`. - Args: - x: A pytorch tensor. The initial value at time `s`. - model_prev_list: A list of pytorch tensor. The previous computed model values. - t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],) - t: A pytorch tensor. The ending time, with the shape (x.shape[0],). - solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. - The type slightly impacts the performance. We recommend to use 'dpm_solver' type. - Returns: - x_t: A pytorch tensor. The approximated solution at time `t`. - """ - ns = self.noise_schedule - dims = x.dim() - model_prev_2, model_prev_1, model_prev_0 = model_prev_list - t_prev_2, t_prev_1, t_prev_0 = t_prev_list - lambda_prev_2, lambda_prev_1, lambda_prev_0, lambda_t = ns.marginal_lambda(t_prev_2), ns.marginal_lambda( - t_prev_1), ns.marginal_lambda(t_prev_0), ns.marginal_lambda(t) - log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t) - sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t) - alpha_t = torch.exp(log_alpha_t) - - h_1 = lambda_prev_1 - lambda_prev_2 - h_0 = lambda_prev_0 - lambda_prev_1 - h = lambda_t - lambda_prev_0 - r0, r1 = h_0 / h, h_1 / h - D1_0 = expand_dims(1. / r0, dims) * (model_prev_0 - model_prev_1) - D1_1 = expand_dims(1. / r1, dims) * (model_prev_1 - model_prev_2) - D1 = D1_0 + expand_dims(r0 / (r0 + r1), dims) * (D1_0 - D1_1) - D2 = expand_dims(1. / (r0 + r1), dims) * (D1_0 - D1_1) - if self.predict_x0: - x_t = ( - expand_dims(sigma_t / sigma_prev_0, dims) * x - - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0 - + expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * D1 - - expand_dims(alpha_t * ((torch.exp(-h) - 1. + h) / h ** 2 - 0.5), dims) * D2 - ) - else: - x_t = ( - expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x - - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0 - - expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * D1 - - expand_dims(sigma_t * ((torch.exp(h) - 1. - h) / h ** 2 - 0.5), dims) * D2 - ) - return x_t - - def singlestep_dpm_solver_update(self, x, s, t, order, return_intermediate=False, solver_type='dpm_solver', r1=None, - r2=None): - """ - Singlestep DPM-Solver with the order `order` from time `s` to time `t`. - Args: - x: A pytorch tensor. The initial value at time `s`. - s: A pytorch tensor. The starting time, with the shape (x.shape[0],). - t: A pytorch tensor. The ending time, with the shape (x.shape[0],). - order: A `int`. The order of DPM-Solver. We only support order == 1 or 2 or 3. - return_intermediate: A `bool`. If true, also return the model value at time `s`, `s1` and `s2` (the intermediate times). - solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. - The type slightly impacts the performance. We recommend to use 'dpm_solver' type. - r1: A `float`. The hyperparameter of the second-order or third-order solver. - r2: A `float`. The hyperparameter of the third-order solver. - Returns: - x_t: A pytorch tensor. The approximated solution at time `t`. - """ - if order == 1: - return self.dpm_solver_first_update(x, s, t, return_intermediate=return_intermediate) - elif order == 2: - return self.singlestep_dpm_solver_second_update(x, s, t, return_intermediate=return_intermediate, - solver_type=solver_type, r1=r1) - elif order == 3: - return self.singlestep_dpm_solver_third_update(x, s, t, return_intermediate=return_intermediate, - solver_type=solver_type, r1=r1, r2=r2) - else: - raise ValueError("Solver order must be 1 or 2 or 3, got {}".format(order)) - - def multistep_dpm_solver_update(self, x, model_prev_list, t_prev_list, t, order, solver_type='dpm_solver'): - """ - Multistep DPM-Solver with the order `order` from time `t_prev_list[-1]` to time `t`. - Args: - x: A pytorch tensor. The initial value at time `s`. - model_prev_list: A list of pytorch tensor. The previous computed model values. - t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],) - t: A pytorch tensor. The ending time, with the shape (x.shape[0],). - order: A `int`. The order of DPM-Solver. We only support order == 1 or 2 or 3. - solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. - The type slightly impacts the performance. We recommend to use 'dpm_solver' type. - Returns: - x_t: A pytorch tensor. The approximated solution at time `t`. - """ - if order == 1: - return self.dpm_solver_first_update(x, t_prev_list[-1], t, model_s=model_prev_list[-1]) - elif order == 2: - return self.multistep_dpm_solver_second_update(x, model_prev_list, t_prev_list, t, solver_type=solver_type) - elif order == 3: - return self.multistep_dpm_solver_third_update(x, model_prev_list, t_prev_list, t, solver_type=solver_type) - else: - raise ValueError("Solver order must be 1 or 2 or 3, got {}".format(order)) - - def dpm_solver_adaptive(self, x, order, t_T, t_0, h_init=0.05, atol=0.0078, rtol=0.05, theta=0.9, t_err=1e-5, - solver_type='dpm_solver'): - """ - The adaptive step size solver based on singlestep DPM-Solver. - Args: - x: A pytorch tensor. The initial value at time `t_T`. - order: A `int`. The (higher) order of the solver. We only support order == 2 or 3. - t_T: A `float`. The starting time of the sampling (default is T). - t_0: A `float`. The ending time of the sampling (default is epsilon). - h_init: A `float`. The initial step size (for logSNR). - atol: A `float`. The absolute tolerance of the solver. For image data, the default setting is 0.0078, followed [1]. - rtol: A `float`. The relative tolerance of the solver. The default setting is 0.05. - theta: A `float`. The safety hyperparameter for adapting the step size. The default setting is 0.9, followed [1]. - t_err: A `float`. The tolerance for the time. We solve the diffusion ODE until the absolute error between the - current time and `t_0` is less than `t_err`. The default setting is 1e-5. - solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. - The type slightly impacts the performance. We recommend to use 'dpm_solver' type. - Returns: - x_0: A pytorch tensor. The approximated solution at time `t_0`. - [1] A. Jolicoeur-Martineau, K. Li, R. Piché-Taillefer, T. Kachman, and I. Mitliagkas, "Gotta go fast when generating data with score-based models," arXiv preprint arXiv:2105.14080, 2021. - """ - ns = self.noise_schedule - s = t_T * torch.ones((x.shape[0],)).to(x) - lambda_s = ns.marginal_lambda(s) - lambda_0 = ns.marginal_lambda(t_0 * torch.ones_like(s).to(x)) - h = h_init * torch.ones_like(s).to(x) - x_prev = x - nfe = 0 - if order == 2: - r1 = 0.5 - lower_update = lambda x, s, t: self.dpm_solver_first_update(x, s, t, return_intermediate=True) - higher_update = lambda x, s, t, **kwargs: self.singlestep_dpm_solver_second_update(x, s, t, r1=r1, - solver_type=solver_type, - **kwargs) - elif order == 3: - r1, r2 = 1. / 3., 2. / 3. - lower_update = lambda x, s, t: self.singlestep_dpm_solver_second_update(x, s, t, r1=r1, - return_intermediate=True, - solver_type=solver_type) - higher_update = lambda x, s, t, **kwargs: self.singlestep_dpm_solver_third_update(x, s, t, r1=r1, r2=r2, - solver_type=solver_type, - **kwargs) - else: - raise ValueError("For adaptive step size solver, order must be 2 or 3, got {}".format(order)) - while torch.abs((s - t_0)).mean() > t_err: - t = ns.inverse_lambda(lambda_s + h) - x_lower, lower_noise_kwargs = lower_update(x, s, t) - x_higher = higher_update(x, s, t, **lower_noise_kwargs) - delta = torch.max(torch.ones_like(x).to(x) * atol, rtol * torch.max(torch.abs(x_lower), torch.abs(x_prev))) - norm_fn = lambda v: torch.sqrt(torch.square(v.reshape((v.shape[0], -1))).mean(dim=-1, keepdim=True)) - E = norm_fn((x_higher - x_lower) / delta).max() - if torch.all(E <= 1.): - x = x_higher - s = t - x_prev = x_lower - lambda_s = ns.marginal_lambda(s) - h = torch.min(theta * h * torch.float_power(E, -1. / order).float(), lambda_0 - lambda_s) - nfe += order - print('adaptive solver nfe', nfe) - return x - - def sample(self, x, steps=20, t_start=None, t_end=None, order=3, skip_type='time_uniform', - method='singlestep', lower_order_final=True, denoise_to_zero=False, solver_type='dpm_solver', - atol=0.0078, rtol=0.05, - ): - """ - Compute the sample at time `t_end` by DPM-Solver, given the initial `x` at time `t_start`. - ===================================================== - We support the following algorithms for both noise prediction model and data prediction model: - - 'singlestep': - Singlestep DPM-Solver (i.e. "DPM-Solver-fast" in the paper), which combines different orders of singlestep DPM-Solver. - We combine all the singlestep solvers with order <= `order` to use up all the function evaluations (steps). - The total number of function evaluations (NFE) == `steps`. - Given a fixed NFE == `steps`, the sampling procedure is: - - If `order` == 1: - - Denote K = steps. We use K steps of DPM-Solver-1 (i.e. DDIM). - - If `order` == 2: - - Denote K = (steps // 2) + (steps % 2). We take K intermediate time steps for sampling. - - If steps % 2 == 0, we use K steps of singlestep DPM-Solver-2. - - If steps % 2 == 1, we use (K - 1) steps of singlestep DPM-Solver-2 and 1 step of DPM-Solver-1. - - If `order` == 3: - - Denote K = (steps // 3 + 1). We take K intermediate time steps for sampling. - - If steps % 3 == 0, we use (K - 2) steps of singlestep DPM-Solver-3, and 1 step of singlestep DPM-Solver-2 and 1 step of DPM-Solver-1. - - If steps % 3 == 1, we use (K - 1) steps of singlestep DPM-Solver-3 and 1 step of DPM-Solver-1. - - If steps % 3 == 2, we use (K - 1) steps of singlestep DPM-Solver-3 and 1 step of singlestep DPM-Solver-2. - - 'multistep': - Multistep DPM-Solver with the order of `order`. The total number of function evaluations (NFE) == `steps`. - We initialize the first `order` values by lower order multistep solvers. - Given a fixed NFE == `steps`, the sampling procedure is: - Denote K = steps. - - If `order` == 1: - - We use K steps of DPM-Solver-1 (i.e. DDIM). - - If `order` == 2: - - We firstly use 1 step of DPM-Solver-1, then use (K - 1) step of multistep DPM-Solver-2. - - If `order` == 3: - - We firstly use 1 step of DPM-Solver-1, then 1 step of multistep DPM-Solver-2, then (K - 2) step of multistep DPM-Solver-3. - - 'singlestep_fixed': - Fixed order singlestep DPM-Solver (i.e. DPM-Solver-1 or singlestep DPM-Solver-2 or singlestep DPM-Solver-3). - We use singlestep DPM-Solver-`order` for `order`=1 or 2 or 3, with total [`steps` // `order`] * `order` NFE. - - 'adaptive': - Adaptive step size DPM-Solver (i.e. "DPM-Solver-12" and "DPM-Solver-23" in the paper). - We ignore `steps` and use adaptive step size DPM-Solver with a higher order of `order`. - You can adjust the absolute tolerance `atol` and the relative tolerance `rtol` to balance the computatation costs - (NFE) and the sample quality. - - If `order` == 2, we use DPM-Solver-12 which combines DPM-Solver-1 and singlestep DPM-Solver-2. - - If `order` == 3, we use DPM-Solver-23 which combines singlestep DPM-Solver-2 and singlestep DPM-Solver-3. - ===================================================== - Some advices for choosing the algorithm: - - For **unconditional sampling** or **guided sampling with small guidance scale** by DPMs: - Use singlestep DPM-Solver ("DPM-Solver-fast" in the paper) with `order = 3`. - e.g. - >>> dpm_solver = DPM_Solver(model_fn, noise_schedule, predict_x0=False) - >>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=3, - skip_type='time_uniform', method='singlestep') - - For **guided sampling with large guidance scale** by DPMs: - Use multistep DPM-Solver with `predict_x0 = True` and `order = 2`. - e.g. - >>> dpm_solver = DPM_Solver(model_fn, noise_schedule, predict_x0=True) - >>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=2, - skip_type='time_uniform', method='multistep') - We support three types of `skip_type`: - - 'logSNR': uniform logSNR for the time steps. **Recommended for low-resolutional images** - - 'time_uniform': uniform time for the time steps. **Recommended for high-resolutional images**. - - 'time_quadratic': quadratic time for the time steps. - ===================================================== - Args: - x: A pytorch tensor. The initial value at time `t_start` - e.g. if `t_start` == T, then `x` is a sample from the standard normal distribution. - steps: A `int`. The total number of function evaluations (NFE). - t_start: A `float`. The starting time of the sampling. - If `T` is None, we use self.noise_schedule.T (default is 1.0). - t_end: A `float`. The ending time of the sampling. - If `t_end` is None, we use 1. / self.noise_schedule.total_N. - e.g. if total_N == 1000, we have `t_end` == 1e-3. - For discrete-time DPMs: - - We recommend `t_end` == 1. / self.noise_schedule.total_N. - For continuous-time DPMs: - - We recommend `t_end` == 1e-3 when `steps` <= 15; and `t_end` == 1e-4 when `steps` > 15. - order: A `int`. The order of DPM-Solver. - skip_type: A `str`. The type for the spacing of the time steps. 'time_uniform' or 'logSNR' or 'time_quadratic'. - method: A `str`. The method for sampling. 'singlestep' or 'multistep' or 'singlestep_fixed' or 'adaptive'. - denoise_to_zero: A `bool`. Whether to denoise to time 0 at the final step. - Default is `False`. If `denoise_to_zero` is `True`, the total NFE is (`steps` + 1). - This trick is firstly proposed by DDPM (https://arxiv.org/abs/2006.11239) and - score_sde (https://arxiv.org/abs/2011.13456). Such trick can improve the FID - for diffusion models sampling by diffusion SDEs for low-resolutional images - (such as CIFAR-10). However, we observed that such trick does not matter for - high-resolutional images. As it needs an additional NFE, we do not recommend - it for high-resolutional images. - lower_order_final: A `bool`. Whether to use lower order solvers at the final steps. - Only valid for `method=multistep` and `steps < 15`. We empirically find that - this trick is a key to stabilizing the sampling by DPM-Solver with very few steps - (especially for steps <= 10). So we recommend to set it to be `True`. - solver_type: A `str`. The taylor expansion type for the solver. `dpm_solver` or `taylor`. We recommend `dpm_solver`. - atol: A `float`. The absolute tolerance of the adaptive step size solver. Valid when `method` == 'adaptive'. - rtol: A `float`. The relative tolerance of the adaptive step size solver. Valid when `method` == 'adaptive'. - Returns: - x_end: A pytorch tensor. The approximated solution at time `t_end`. - """ - t_0 = 1. / self.noise_schedule.total_N if t_end is None else t_end - t_T = self.noise_schedule.T if t_start is None else t_start - device = x.device - if method == 'adaptive': - with torch.no_grad(): - x = self.dpm_solver_adaptive(x, order=order, t_T=t_T, t_0=t_0, atol=atol, rtol=rtol, - solver_type=solver_type) - elif method == 'multistep': - assert steps >= order - timesteps = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=steps, device=device) - assert timesteps.shape[0] - 1 == steps - with torch.no_grad(): - vec_t = timesteps[0].expand((x.shape[0])) - model_prev_list = [self.model_fn(x, vec_t)] - t_prev_list = [vec_t] - # Init the first `order` values by lower order multistep DPM-Solver. - for init_order in tqdm(range(1, order), desc="DPM init order"): - vec_t = timesteps[init_order].expand(x.shape[0]) - x = self.multistep_dpm_solver_update(x, model_prev_list, t_prev_list, vec_t, init_order, - solver_type=solver_type) - model_prev_list.append(self.model_fn(x, vec_t)) - t_prev_list.append(vec_t) - # Compute the remaining values by `order`-th order multistep DPM-Solver. - for step in tqdm(range(order, steps + 1), desc="DPM multistep"): - vec_t = timesteps[step].expand(x.shape[0]) - if lower_order_final and steps < 15: - step_order = min(order, steps + 1 - step) - else: - step_order = order - x = self.multistep_dpm_solver_update(x, model_prev_list, t_prev_list, vec_t, step_order, - solver_type=solver_type) - for i in range(order - 1): - t_prev_list[i] = t_prev_list[i + 1] - model_prev_list[i] = model_prev_list[i + 1] - t_prev_list[-1] = vec_t - # We do not need to evaluate the final model value. - if step < steps: - model_prev_list[-1] = self.model_fn(x, vec_t) - elif method in ['singlestep', 'singlestep_fixed']: - if method == 'singlestep': - timesteps_outer, orders = self.get_orders_and_timesteps_for_singlestep_solver(steps=steps, order=order, - skip_type=skip_type, - t_T=t_T, t_0=t_0, - device=device) - elif method == 'singlestep_fixed': - K = steps // order - orders = [order, ] * K - timesteps_outer = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=K, device=device) - for i, order in enumerate(orders): - t_T_inner, t_0_inner = timesteps_outer[i], timesteps_outer[i + 1] - timesteps_inner = self.get_time_steps(skip_type=skip_type, t_T=t_T_inner.item(), t_0=t_0_inner.item(), - N=order, device=device) - lambda_inner = self.noise_schedule.marginal_lambda(timesteps_inner) - vec_s, vec_t = t_T_inner.tile(x.shape[0]), t_0_inner.tile(x.shape[0]) - h = lambda_inner[-1] - lambda_inner[0] - r1 = None if order <= 1 else (lambda_inner[1] - lambda_inner[0]) / h - r2 = None if order <= 2 else (lambda_inner[2] - lambda_inner[0]) / h - x = self.singlestep_dpm_solver_update(x, vec_s, vec_t, order, solver_type=solver_type, r1=r1, r2=r2) - if denoise_to_zero: - x = self.denoise_to_zero_fn(x, torch.ones((x.shape[0],)).to(device) * t_0) - return x - - -############################################################# -# other utility functions -############################################################# - -def interpolate_fn(x, xp, yp): - """ - A piecewise linear function y = f(x), using xp and yp as keypoints. - We implement f(x) in a differentiable way (i.e. applicable for autograd). - The function f(x) is well-defined for all x-axis. (For x beyond the bounds of xp, we use the outmost points of xp to define the linear function.) - Args: - x: PyTorch tensor with shape [N, C], where N is the batch size, C is the number of channels (we use C = 1 for DPM-Solver). - xp: PyTorch tensor with shape [C, K], where K is the number of keypoints. - yp: PyTorch tensor with shape [C, K]. - Returns: - The function values f(x), with shape [N, C]. - """ - N, K = x.shape[0], xp.shape[1] - all_x = torch.cat([x.unsqueeze(2), xp.unsqueeze(0).repeat((N, 1, 1))], dim=2) - sorted_all_x, x_indices = torch.sort(all_x, dim=2) - x_idx = torch.argmin(x_indices, dim=2) - cand_start_idx = x_idx - 1 - start_idx = torch.where( - torch.eq(x_idx, 0), - torch.tensor(1, device=x.device), - torch.where( - torch.eq(x_idx, K), torch.tensor(K - 2, device=x.device), cand_start_idx, - ), - ) - end_idx = torch.where(torch.eq(start_idx, cand_start_idx), start_idx + 2, start_idx + 1) - start_x = torch.gather(sorted_all_x, dim=2, index=start_idx.unsqueeze(2)).squeeze(2) - end_x = torch.gather(sorted_all_x, dim=2, index=end_idx.unsqueeze(2)).squeeze(2) - start_idx2 = torch.where( - torch.eq(x_idx, 0), - torch.tensor(0, device=x.device), - torch.where( - torch.eq(x_idx, K), torch.tensor(K - 2, device=x.device), cand_start_idx, - ), - ) - y_positions_expanded = yp.unsqueeze(0).expand(N, -1, -1) - start_y = torch.gather(y_positions_expanded, dim=2, index=start_idx2.unsqueeze(2)).squeeze(2) - end_y = torch.gather(y_positions_expanded, dim=2, index=(start_idx2 + 1).unsqueeze(2)).squeeze(2) - cand = start_y + (x - start_x) * (end_y - start_y) / (end_x - start_x) - return cand - - -def expand_dims(v, dims): - """ - Expand the tensor `v` to the dim `dims`. - Args: - `v`: a PyTorch tensor with shape [N]. - `dim`: a `int`. - Returns: - a PyTorch tensor with shape [N, 1, 1, ..., 1] and the total dimension is `dims`. - """ - return v[(...,) + (None,) * (dims - 1)] \ No newline at end of file diff --git a/comfy/ldm/models/diffusion/dpm_solver/sampler.py b/comfy/ldm/models/diffusion/dpm_solver/sampler.py deleted file mode 100644 index e4d0d0a38..000000000 --- a/comfy/ldm/models/diffusion/dpm_solver/sampler.py +++ /dev/null @@ -1,96 +0,0 @@ -"""SAMPLING ONLY.""" -import torch - -from .dpm_solver import NoiseScheduleVP, model_wrapper, DPM_Solver - -MODEL_TYPES = { - "eps": "noise", - "v": "v" -} - - -class DPMSolverSampler(object): - def __init__(self, model, device=torch.device("cuda"), **kwargs): - super().__init__() - self.model = model - self.device = device - to_torch = lambda x: x.clone().detach().to(torch.float32).to(model.device) - self.register_buffer('alphas_cumprod', to_torch(model.alphas_cumprod)) - - def register_buffer(self, name, attr): - if type(attr) == torch.Tensor: - if attr.device != self.device: - attr = attr.to(self.device) - setattr(self, name, attr) - - @torch.no_grad() - def sample(self, - S, - batch_size, - shape, - conditioning=None, - callback=None, - normals_sequence=None, - img_callback=None, - quantize_x0=False, - eta=0., - mask=None, - x0=None, - temperature=1., - noise_dropout=0., - score_corrector=None, - corrector_kwargs=None, - verbose=True, - x_T=None, - log_every_t=100, - unconditional_guidance_scale=1., - unconditional_conditioning=None, - # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ... - **kwargs - ): - if conditioning is not None: - if isinstance(conditioning, dict): - ctmp = conditioning[list(conditioning.keys())[0]] - while isinstance(ctmp, list): ctmp = ctmp[0] - if isinstance(ctmp, torch.Tensor): - cbs = ctmp.shape[0] - if cbs != batch_size: - print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}") - elif isinstance(conditioning, list): - for ctmp in conditioning: - if ctmp.shape[0] != batch_size: - print(f"Warning: Got {ctmp.shape[0]} conditionings but batch-size is {batch_size}") - else: - if isinstance(conditioning, torch.Tensor): - if conditioning.shape[0] != batch_size: - print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}") - - # sampling - C, H, W = shape - size = (batch_size, C, H, W) - - print(f'Data shape for DPM-Solver sampling is {size}, sampling steps {S}') - - device = self.model.betas.device - if x_T is None: - img = torch.randn(size, device=device) - else: - img = x_T - - ns = NoiseScheduleVP('discrete', alphas_cumprod=self.alphas_cumprod) - - model_fn = model_wrapper( - lambda x, t, c: self.model.apply_model(x, t, c), - ns, - model_type=MODEL_TYPES[self.model.parameterization], - guidance_type="classifier-free", - condition=conditioning, - unconditional_condition=unconditional_conditioning, - guidance_scale=unconditional_guidance_scale, - ) - - dpm_solver = DPM_Solver(model_fn, ns, predict_x0=True, thresholding=False) - x = dpm_solver.sample(img, steps=S, skip_type="time_uniform", method="multistep", order=2, - lower_order_final=True) - - return x.to(device), None diff --git a/comfy/ldm/models/diffusion/plms.py b/comfy/ldm/models/diffusion/plms.py deleted file mode 100644 index 9d31b3994..000000000 --- a/comfy/ldm/models/diffusion/plms.py +++ /dev/null @@ -1,245 +0,0 @@ -"""SAMPLING ONLY.""" - -import torch -import numpy as np -from tqdm import tqdm -from functools import partial - -from ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like -from ldm.models.diffusion.sampling_util import norm_thresholding - - -class PLMSSampler(object): - def __init__(self, model, schedule="linear", device=torch.device("cuda"), **kwargs): - super().__init__() - self.model = model - self.ddpm_num_timesteps = model.num_timesteps - self.schedule = schedule - self.device = device - - def register_buffer(self, name, attr): - if type(attr) == torch.Tensor: - if attr.device != self.device: - attr = attr.to(self.device) - setattr(self, name, attr) - - def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True): - if ddim_eta != 0: - raise ValueError('ddim_eta must be 0 for PLMS') - self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps, - num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose) - alphas_cumprod = self.model.alphas_cumprod - assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep' - to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device) - - self.register_buffer('betas', to_torch(self.model.betas)) - self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) - self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev)) - - # calculations for diffusion q(x_t | x_{t-1}) and others - self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu()))) - self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu()))) - self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu()))) - self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu()))) - self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1))) - - # ddim sampling parameters - ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(), - ddim_timesteps=self.ddim_timesteps, - eta=ddim_eta,verbose=verbose) - self.register_buffer('ddim_sigmas', ddim_sigmas) - self.register_buffer('ddim_alphas', ddim_alphas) - self.register_buffer('ddim_alphas_prev', ddim_alphas_prev) - self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas)) - sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt( - (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * ( - 1 - self.alphas_cumprod / self.alphas_cumprod_prev)) - self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps) - - @torch.no_grad() - def sample(self, - S, - batch_size, - shape, - conditioning=None, - callback=None, - normals_sequence=None, - img_callback=None, - quantize_x0=False, - eta=0., - mask=None, - x0=None, - temperature=1., - noise_dropout=0., - score_corrector=None, - corrector_kwargs=None, - verbose=True, - x_T=None, - log_every_t=100, - unconditional_guidance_scale=1., - unconditional_conditioning=None, - # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ... - dynamic_threshold=None, - **kwargs - ): - if conditioning is not None: - if isinstance(conditioning, dict): - cbs = conditioning[list(conditioning.keys())[0]].shape[0] - if cbs != batch_size: - print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}") - else: - if conditioning.shape[0] != batch_size: - print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}") - - self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose) - # sampling - C, H, W = shape - size = (batch_size, C, H, W) - print(f'Data shape for PLMS sampling is {size}') - - samples, intermediates = self.plms_sampling(conditioning, size, - callback=callback, - img_callback=img_callback, - quantize_denoised=quantize_x0, - mask=mask, x0=x0, - ddim_use_original_steps=False, - noise_dropout=noise_dropout, - temperature=temperature, - score_corrector=score_corrector, - corrector_kwargs=corrector_kwargs, - x_T=x_T, - log_every_t=log_every_t, - unconditional_guidance_scale=unconditional_guidance_scale, - unconditional_conditioning=unconditional_conditioning, - dynamic_threshold=dynamic_threshold, - ) - return samples, intermediates - - @torch.no_grad() - def plms_sampling(self, cond, shape, - x_T=None, ddim_use_original_steps=False, - callback=None, timesteps=None, quantize_denoised=False, - mask=None, x0=None, img_callback=None, log_every_t=100, - temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, - unconditional_guidance_scale=1., unconditional_conditioning=None, - dynamic_threshold=None): - device = self.model.betas.device - b = shape[0] - if x_T is None: - img = torch.randn(shape, device=device) - else: - img = x_T - - if timesteps is None: - timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps - elif timesteps is not None and not ddim_use_original_steps: - subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1 - timesteps = self.ddim_timesteps[:subset_end] - - intermediates = {'x_inter': [img], 'pred_x0': [img]} - time_range = list(reversed(range(0,timesteps))) if ddim_use_original_steps else np.flip(timesteps) - total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0] - print(f"Running PLMS Sampling with {total_steps} timesteps") - - iterator = tqdm(time_range, desc='PLMS Sampler', total=total_steps) - old_eps = [] - - for i, step in enumerate(iterator): - index = total_steps - i - 1 - ts = torch.full((b,), step, device=device, dtype=torch.long) - ts_next = torch.full((b,), time_range[min(i + 1, len(time_range) - 1)], device=device, dtype=torch.long) - - if mask is not None: - assert x0 is not None - img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass? - img = img_orig * mask + (1. - mask) * img - - outs = self.p_sample_plms(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps, - quantize_denoised=quantize_denoised, temperature=temperature, - noise_dropout=noise_dropout, score_corrector=score_corrector, - corrector_kwargs=corrector_kwargs, - unconditional_guidance_scale=unconditional_guidance_scale, - unconditional_conditioning=unconditional_conditioning, - old_eps=old_eps, t_next=ts_next, - dynamic_threshold=dynamic_threshold) - img, pred_x0, e_t = outs - old_eps.append(e_t) - if len(old_eps) >= 4: - old_eps.pop(0) - if callback: callback(i) - if img_callback: img_callback(pred_x0, i) - - if index % log_every_t == 0 or index == total_steps - 1: - intermediates['x_inter'].append(img) - intermediates['pred_x0'].append(pred_x0) - - return img, intermediates - - @torch.no_grad() - def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False, - temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, - unconditional_guidance_scale=1., unconditional_conditioning=None, old_eps=None, t_next=None, - dynamic_threshold=None): - b, *_, device = *x.shape, x.device - - def get_model_output(x, t): - if unconditional_conditioning is None or unconditional_guidance_scale == 1.: - e_t = self.model.apply_model(x, t, c) - else: - x_in = torch.cat([x] * 2) - t_in = torch.cat([t] * 2) - c_in = torch.cat([unconditional_conditioning, c]) - e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2) - e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond) - - if score_corrector is not None: - assert self.model.parameterization == "eps" - e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs) - - return e_t - - alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas - alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev - sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas - sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas - - def get_x_prev_and_pred_x0(e_t, index): - # select parameters corresponding to the currently considered timestep - a_t = torch.full((b, 1, 1, 1), alphas[index], device=device) - a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device) - sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device) - sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device) - - # current prediction for x_0 - pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt() - if quantize_denoised: - pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0) - if dynamic_threshold is not None: - pred_x0 = norm_thresholding(pred_x0, dynamic_threshold) - # direction pointing to x_t - dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t - noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature - if noise_dropout > 0.: - noise = torch.nn.functional.dropout(noise, p=noise_dropout) - x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise - return x_prev, pred_x0 - - e_t = get_model_output(x, t) - if len(old_eps) == 0: - # Pseudo Improved Euler (2nd order) - x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t, index) - e_t_next = get_model_output(x_prev, t_next) - e_t_prime = (e_t + e_t_next) / 2 - elif len(old_eps) == 1: - # 2nd order Pseudo Linear Multistep (Adams-Bashforth) - e_t_prime = (3 * e_t - old_eps[-1]) / 2 - elif len(old_eps) == 2: - # 3nd order Pseudo Linear Multistep (Adams-Bashforth) - e_t_prime = (23 * e_t - 16 * old_eps[-1] + 5 * old_eps[-2]) / 12 - elif len(old_eps) >= 3: - # 4nd order Pseudo Linear Multistep (Adams-Bashforth) - e_t_prime = (55 * e_t - 59 * old_eps[-1] + 37 * old_eps[-2] - 9 * old_eps[-3]) / 24 - - x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t_prime, index) - - return x_prev, pred_x0, e_t diff --git a/comfy/ldm/models/diffusion/sampling_util.py b/comfy/ldm/models/diffusion/sampling_util.py deleted file mode 100644 index 7eff02be6..000000000 --- a/comfy/ldm/models/diffusion/sampling_util.py +++ /dev/null @@ -1,22 +0,0 @@ -import torch -import numpy as np - - -def append_dims(x, target_dims): - """Appends dimensions to the end of a tensor until it has target_dims dimensions. - From https://github.com/crowsonkb/k-diffusion/blob/master/k_diffusion/utils.py""" - dims_to_append = target_dims - x.ndim - if dims_to_append < 0: - raise ValueError(f'input has {x.ndim} dims but target_dims is {target_dims}, which is less') - return x[(...,) + (None,) * dims_to_append] - - -def norm_thresholding(x0, value): - s = append_dims(x0.pow(2).flatten(1).mean(1).sqrt().clamp(min=value), x0.ndim) - return x0 * (value / s) - - -def spatial_norm_thresholding(x0, value): - # b c h w - s = x0.pow(2).mean(1, keepdim=True).sqrt().clamp(min=value) - return x0 * (value / s) \ No newline at end of file diff --git a/comfy/samplers.py b/comfy/samplers.py index 5f9c74557..e10e02c41 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -4,8 +4,6 @@ from .extra_samplers import uni_pc import torch import enum from comfy import model_management -from .ldm.models.diffusion.ddim import DDIMSampler -from .ldm.modules.diffusionmodules.util import make_ddim_timesteps import math from comfy import model_base import comfy.utils @@ -511,41 +509,6 @@ class Sampler: sigma = float(sigmas[0]) return math.isclose(max_sigma, sigma, rel_tol=1e-05) or sigma > max_sigma -class DDIM(Sampler): - def sample(self, model_wrap, sigmas, extra_args, callback, noise, latent_image=None, denoise_mask=None, disable_pbar=False): - timesteps = [] - for s in range(sigmas.shape[0]): - timesteps.insert(0, model_wrap.sigma_to_discrete_timestep(sigmas[s])) - noise_mask = None - if denoise_mask is not None: - noise_mask = 1.0 - denoise_mask - - ddim_callback = None - if callback is not None: - total_steps = len(timesteps) - 1 - ddim_callback = lambda pred_x0, i: callback(i, pred_x0, None, total_steps) - - max_denoise = self.max_denoise(model_wrap, sigmas) - - ddim_sampler = DDIMSampler(model_wrap.inner_model.inner_model, device=noise.device) - ddim_sampler.make_schedule_timesteps(ddim_timesteps=timesteps, verbose=False) - z_enc = ddim_sampler.stochastic_encode(latent_image, torch.tensor([len(timesteps) - 1] * noise.shape[0]).to(noise.device), noise=noise, max_denoise=max_denoise) - samples, _ = ddim_sampler.sample_custom(ddim_timesteps=timesteps, - batch_size=noise.shape[0], - shape=noise.shape[1:], - verbose=False, - eta=0.0, - x_T=z_enc, - x0=latent_image, - img_callback=ddim_callback, - denoise_function=model_wrap.predict_eps_discrete_timestep, - extra_args=extra_args, - mask=noise_mask, - to_zero=sigmas[-1]==0, - end_step=sigmas.shape[0] - 1, - disable_pbar=disable_pbar) - return samples - class UNIPC(Sampler): def sample(self, model_wrap, sigmas, extra_args, callback, noise, latent_image=None, denoise_mask=None, disable_pbar=False): return uni_pc.sample_unipc(model_wrap, noise, latent_image, sigmas, sampling_function=sampling_function, max_denoise=self.max_denoise(model_wrap, sigmas), extra_args=extra_args, noise_mask=denoise_mask, callback=callback, disable=disable_pbar) @@ -558,13 +521,17 @@ KSAMPLER_NAMES = ["euler", "euler_ancestral", "heun", "dpm_2", "dpm_2_ancestral" "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm"] -def ksampler(sampler_name, extra_options={}): +def ksampler(sampler_name, extra_options={}, inpaint_options={}): class KSAMPLER(Sampler): def sample(self, model_wrap, sigmas, extra_args, callback, noise, latent_image=None, denoise_mask=None, disable_pbar=False): extra_args["denoise_mask"] = denoise_mask model_k = KSamplerX0Inpaint(model_wrap) model_k.latent_image = latent_image - model_k.noise = noise + if inpaint_options.get("random", False): #TODO: Should this be the default? + generator = torch.manual_seed(extra_args.get("seed", 41) + 1) + model_k.noise = torch.randn(noise.shape, generator=generator, device="cpu").to(noise.dtype).to(noise.device) + else: + model_k.noise = noise if self.max_denoise(model_wrap, sigmas): noise = noise * torch.sqrt(1.0 + sigmas[0] ** 2.0) @@ -656,7 +623,7 @@ def sampler_class(name): elif name == "uni_pc_bh2": sampler = UNIPCBH2 elif name == "ddim": - sampler = DDIM + sampler = ksampler("euler", inpaint_options={"random": True}) else: sampler = ksampler(name) return sampler From 7c0f255de16b78e54e0c051e9f7e1e46c7422c6c Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 31 Oct 2023 22:14:32 -0400 Subject: [PATCH 046/170] Clean up percent start/end and make controlnets work with sigmas. --- comfy/controlnet.py | 14 +++++++++++++- comfy/model_base.py | 5 ++++- comfy/samplers.py | 16 +++++++++------- 3 files changed, 26 insertions(+), 9 deletions(-) diff --git a/comfy/controlnet.py b/comfy/controlnet.py index 2a88dd019..098681582 100644 --- a/comfy/controlnet.py +++ b/comfy/controlnet.py @@ -132,6 +132,7 @@ class ControlNet(ControlBase): self.control_model = control_model self.control_model_wrapped = comfy.model_patcher.ModelPatcher(self.control_model, load_device=comfy.model_management.get_torch_device(), offload_device=comfy.model_management.unet_offload_device()) self.global_average_pooling = global_average_pooling + self.model_sampling_current = None def get_control(self, x_noisy, t, cond, batched_number): control_prev = None @@ -159,7 +160,10 @@ class ControlNet(ControlBase): y = cond.get('y', None) if y is not None: y = y.to(self.control_model.dtype) - control = self.control_model(x=x_noisy.to(self.control_model.dtype), hint=self.cond_hint, timesteps=t, context=context.to(self.control_model.dtype), y=y) + timestep = self.model_sampling_current.timestep(t) + x_noisy = self.model_sampling_current.calculate_input(t, x_noisy) + + control = self.control_model(x=x_noisy.to(self.control_model.dtype), hint=self.cond_hint, timesteps=timestep.float(), context=context.to(self.control_model.dtype), y=y) return self.control_merge(None, control, control_prev, output_dtype) def copy(self): @@ -172,6 +176,14 @@ class ControlNet(ControlBase): out.append(self.control_model_wrapped) return out + def pre_run(self, model, percent_to_timestep_function): + super().pre_run(model, percent_to_timestep_function) + self.model_sampling_current = model.model_sampling + + def cleanup(self): + self.model_sampling_current = None + super().cleanup() + class ControlLoraOps: class Linear(torch.nn.Module): def __init__(self, in_features: int, out_features: int, bias: bool = True, diff --git a/comfy/model_base.py b/comfy/model_base.py index b8d04a2c8..84cf9829d 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -82,6 +82,9 @@ class ModelSamplingDiscrete(torch.nn.Module): log_sigma = (1 - w) * self.log_sigmas[low_idx] + w * self.log_sigmas[high_idx] return log_sigma.exp() + def percent_to_sigma(self, percent): + return self.sigma(torch.tensor(percent * 999.0)) + def model_sampling(model_config, model_type): if model_type == ModelType.EPS: c = EPS @@ -126,7 +129,7 @@ class BaseModel(torch.nn.Module): context = c_crossattn dtype = self.get_dtype() xc = xc.to(dtype) - t = self.model_sampling.timestep(t).to(dtype) + t = self.model_sampling.timestep(t).float() context = context.to(dtype) extra_conds = {} for o in kwargs: diff --git a/comfy/samplers.py b/comfy/samplers.py index e10e02c41..a74c8a1b8 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -415,15 +415,16 @@ def create_cond_with_same_area_if_none(conds, c): conds += [out] def calculate_start_end_timesteps(model, conds): + s = model.model_sampling for t in range(len(conds)): x = conds[t] timestep_start = None timestep_end = None if 'start_percent' in x: - timestep_start = model.sigma_to_t(model.t_to_sigma(torch.tensor(x['start_percent'] * 999.0))) + timestep_start = s.percent_to_sigma(x['start_percent']) if 'end_percent' in x: - timestep_end = model.sigma_to_t(model.t_to_sigma(torch.tensor(x['end_percent'] * 999.0))) + timestep_end = s.percent_to_sigma(x['end_percent']) if (timestep_start is not None) or (timestep_end is not None): n = x.copy() @@ -434,14 +435,15 @@ def calculate_start_end_timesteps(model, conds): conds[t] = n def pre_run_control(model, conds): + s = model.model_sampling for t in range(len(conds)): x = conds[t] timestep_start = None timestep_end = None - percent_to_timestep_function = lambda a: model.sigma_to_t(model.t_to_sigma(torch.tensor(a) * 999.0)) + percent_to_timestep_function = lambda a: s.percent_to_sigma(a) if 'control' in x: - x['control'].pre_run(model.inner_model.inner_model, percent_to_timestep_function) + x['control'].pre_run(model, percent_to_timestep_function) def apply_empty_x_to_equal_area(conds, uncond, name, uncond_fill_func): cond_cnets = [] @@ -571,8 +573,8 @@ def sample(model, noise, positive, negative, cfg, device, sampler, sigmas, model model_wrap = wrap_model(model) - calculate_start_end_timesteps(model_wrap, negative) - calculate_start_end_timesteps(model_wrap, positive) + calculate_start_end_timesteps(model, negative) + calculate_start_end_timesteps(model, positive) #make sure each cond area has an opposite one with the same area for c in positive: @@ -580,7 +582,7 @@ def sample(model, noise, positive, negative, cfg, device, sampler, sigmas, model for c in negative: create_cond_with_same_area_if_none(positive, c) - pre_run_control(model_wrap, negative + positive) + pre_run_control(model, negative + positive) apply_empty_x_to_equal_area(list(filter(lambda c: c.get('control_apply_to_uncond', False) == True, positive)), negative, 'control', lambda cond_cnets, x: cond_cnets[x]) apply_empty_x_to_equal_area(positive, negative, 'gligen', lambda cond_cnets, x: cond_cnets[x]) From 111f1b525526a850cf222d2bccec0cdb3e2c988b Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 31 Oct 2023 23:19:02 -0400 Subject: [PATCH 047/170] Fix some issues with sampling precision. --- comfy/model_base.py | 4 ++-- comfy/samplers.py | 6 ++++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/comfy/model_base.py b/comfy/model_base.py index 84cf9829d..37a52debf 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -44,7 +44,7 @@ class ModelSamplingDiscrete(torch.nn.Module): else: betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) alphas = 1. - betas - alphas_cumprod = np.cumprod(alphas, axis=0) + alphas_cumprod = torch.tensor(np.cumprod(alphas, axis=0), dtype=torch.float32) # alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) timesteps, = betas.shape @@ -56,7 +56,7 @@ class ModelSamplingDiscrete(torch.nn.Module): # self.register_buffer('alphas_cumprod', torch.tensor(alphas_cumprod, dtype=torch.float32)) # self.register_buffer('alphas_cumprod_prev', torch.tensor(alphas_cumprod_prev, dtype=torch.float32)) - sigmas = torch.tensor(((1 - alphas_cumprod) / alphas_cumprod) ** 0.5, dtype=torch.float32) + sigmas = ((1 - alphas_cumprod) / alphas_cumprod) ** 0.5 self.register_buffer('sigmas', sigmas) self.register_buffer('log_sigmas', sigmas.log()) diff --git a/comfy/samplers.py b/comfy/samplers.py index a74c8a1b8..518b666db 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -137,10 +137,10 @@ def sampling_function(model_function, x, timestep, uncond, cond, cond_scale, mod def calc_cond_uncond_batch(model_function, cond, uncond, x_in, timestep, max_total_area, model_options): out_cond = torch.zeros_like(x_in) - out_count = torch.ones_like(x_in)/100000.0 + out_count = torch.zeros_like(x_in) out_uncond = torch.zeros_like(x_in) - out_uncond_count = torch.ones_like(x_in)/100000.0 + out_uncond_count = torch.zeros_like(x_in) COND = 0 UNCOND = 1 @@ -241,6 +241,8 @@ def sampling_function(model_function, x, timestep, uncond, cond, cond_scale, mod out_uncond /= out_uncond_count del out_uncond_count + torch.nan_to_num(out_cond, nan=0.0, posinf=0.0, neginf=0.0, out=out_cond) #in case out_count or out_uncond_count had some zeros + torch.nan_to_num(out_uncond, nan=0.0, posinf=0.0, neginf=0.0, out=out_uncond) return out_cond, out_uncond From e73ec8c4dad72650e94a5c9fdad574b2d2dae66f Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 1 Nov 2023 00:01:30 -0400 Subject: [PATCH 048/170] Not used anymore. --- comfy/k_diffusion/external.py | 194 ---------------------------------- comfy/samplers.py | 1 - 2 files changed, 195 deletions(-) delete mode 100644 comfy/k_diffusion/external.py diff --git a/comfy/k_diffusion/external.py b/comfy/k_diffusion/external.py deleted file mode 100644 index 953d3db2c..000000000 --- a/comfy/k_diffusion/external.py +++ /dev/null @@ -1,194 +0,0 @@ -import math - -import torch -from torch import nn - -from . import sampling, utils - - -class VDenoiser(nn.Module): - """A v-diffusion-pytorch model wrapper for k-diffusion.""" - - def __init__(self, inner_model): - super().__init__() - self.inner_model = inner_model - self.sigma_data = 1. - - def get_scalings(self, sigma): - c_skip = self.sigma_data ** 2 / (sigma ** 2 + self.sigma_data ** 2) - c_out = -sigma * self.sigma_data / (sigma ** 2 + self.sigma_data ** 2) ** 0.5 - c_in = 1 / (sigma ** 2 + self.sigma_data ** 2) ** 0.5 - return c_skip, c_out, c_in - - def sigma_to_t(self, sigma): - return sigma.atan() / math.pi * 2 - - def t_to_sigma(self, t): - return (t * math.pi / 2).tan() - - def loss(self, input, noise, sigma, **kwargs): - c_skip, c_out, c_in = [utils.append_dims(x, input.ndim) for x in self.get_scalings(sigma)] - noised_input = input + noise * utils.append_dims(sigma, input.ndim) - model_output = self.inner_model(noised_input * c_in, self.sigma_to_t(sigma), **kwargs) - target = (input - c_skip * noised_input) / c_out - return (model_output - target).pow(2).flatten(1).mean(1) - - def forward(self, input, sigma, **kwargs): - c_skip, c_out, c_in = [utils.append_dims(x, input.ndim) for x in self.get_scalings(sigma)] - return self.inner_model(input * c_in, self.sigma_to_t(sigma), **kwargs) * c_out + input * c_skip - - -class DiscreteSchedule(nn.Module): - """A mapping between continuous noise levels (sigmas) and a list of discrete noise - levels.""" - - def __init__(self, sigmas, quantize): - super().__init__() - self.register_buffer('sigmas', sigmas) - self.register_buffer('log_sigmas', sigmas.log()) - self.quantize = quantize - - @property - def sigma_min(self): - return self.sigmas[0] - - @property - def sigma_max(self): - return self.sigmas[-1] - - def get_sigmas(self, n=None): - if n is None: - return sampling.append_zero(self.sigmas.flip(0)) - t_max = len(self.sigmas) - 1 - t = torch.linspace(t_max, 0, n, device=self.sigmas.device) - return sampling.append_zero(self.t_to_sigma(t)) - - def sigma_to_discrete_timestep(self, sigma): - log_sigma = sigma.log() - dists = log_sigma.to(self.log_sigmas.device) - self.log_sigmas[:, None] - return dists.abs().argmin(dim=0).view(sigma.shape) - - def sigma_to_t(self, sigma, quantize=None): - quantize = self.quantize if quantize is None else quantize - if quantize: - return self.sigma_to_discrete_timestep(sigma) - log_sigma = sigma.log() - dists = log_sigma.to(self.log_sigmas.device) - self.log_sigmas[:, None] - low_idx = dists.ge(0).cumsum(dim=0).argmax(dim=0).clamp(max=self.log_sigmas.shape[0] - 2) - high_idx = low_idx + 1 - low, high = self.log_sigmas[low_idx], self.log_sigmas[high_idx] - w = (low - log_sigma) / (low - high) - w = w.clamp(0, 1) - t = (1 - w) * low_idx + w * high_idx - return t.view(sigma.shape) - - def t_to_sigma(self, t): - t = t.float() - low_idx = t.floor().long() - high_idx = t.ceil().long() - w = t-low_idx if t.device.type == 'mps' else t.frac() - log_sigma = (1 - w) * self.log_sigmas[low_idx] + w * self.log_sigmas[high_idx] - return log_sigma.exp() - - def predict_eps_discrete_timestep(self, input, t, **kwargs): - if t.dtype != torch.int64 and t.dtype != torch.int32: - t = t.round() - sigma = self.t_to_sigma(t) - input = input * ((utils.append_dims(sigma, input.ndim) ** 2 + 1.0) ** 0.5) - return (input - self(input, sigma, **kwargs)) / utils.append_dims(sigma, input.ndim) - - def predict_eps_sigma(self, input, sigma, **kwargs): - input = input * ((utils.append_dims(sigma, input.ndim) ** 2 + 1.0) ** 0.5) - return (input - self(input, sigma, **kwargs)) / utils.append_dims(sigma, input.ndim) - -class DiscreteEpsDDPMDenoiser(DiscreteSchedule): - """A wrapper for discrete schedule DDPM models that output eps (the predicted - noise).""" - - def __init__(self, model, alphas_cumprod, quantize): - super().__init__(((1 - alphas_cumprod) / alphas_cumprod) ** 0.5, quantize) - self.inner_model = model - self.sigma_data = 1. - - def get_scalings(self, sigma): - c_out = -sigma - c_in = 1 / (sigma ** 2 + self.sigma_data ** 2) ** 0.5 - return c_out, c_in - - def get_eps(self, *args, **kwargs): - return self.inner_model(*args, **kwargs) - - def loss(self, input, noise, sigma, **kwargs): - c_out, c_in = [utils.append_dims(x, input.ndim) for x in self.get_scalings(sigma)] - noised_input = input + noise * utils.append_dims(sigma, input.ndim) - eps = self.get_eps(noised_input * c_in, self.sigma_to_t(sigma), **kwargs) - return (eps - noise).pow(2).flatten(1).mean(1) - - def forward(self, input, sigma, **kwargs): - c_out, c_in = [utils.append_dims(x, input.ndim) for x in self.get_scalings(sigma)] - eps = self.get_eps(input * c_in, self.sigma_to_t(sigma), **kwargs) - return input + eps * c_out - - -class OpenAIDenoiser(DiscreteEpsDDPMDenoiser): - """A wrapper for OpenAI diffusion models.""" - - def __init__(self, model, diffusion, quantize=False, has_learned_sigmas=True, device='cpu'): - alphas_cumprod = torch.tensor(diffusion.alphas_cumprod, device=device, dtype=torch.float32) - super().__init__(model, alphas_cumprod, quantize=quantize) - self.has_learned_sigmas = has_learned_sigmas - - def get_eps(self, *args, **kwargs): - model_output = self.inner_model(*args, **kwargs) - if self.has_learned_sigmas: - return model_output.chunk(2, dim=1)[0] - return model_output - - -class CompVisDenoiser(DiscreteEpsDDPMDenoiser): - """A wrapper for CompVis diffusion models.""" - - def __init__(self, model, quantize=False, device='cpu'): - super().__init__(model, model.alphas_cumprod, quantize=quantize) - - def get_eps(self, *args, **kwargs): - return self.inner_model.apply_model(*args, **kwargs) - - -class DiscreteVDDPMDenoiser(DiscreteSchedule): - """A wrapper for discrete schedule DDPM models that output v.""" - - def __init__(self, model, alphas_cumprod, quantize): - super().__init__(((1 - alphas_cumprod) / alphas_cumprod) ** 0.5, quantize) - self.inner_model = model - self.sigma_data = 1. - - def get_scalings(self, sigma): - c_skip = self.sigma_data ** 2 / (sigma ** 2 + self.sigma_data ** 2) - c_out = -sigma * self.sigma_data / (sigma ** 2 + self.sigma_data ** 2) ** 0.5 - c_in = 1 / (sigma ** 2 + self.sigma_data ** 2) ** 0.5 - return c_skip, c_out, c_in - - def get_v(self, *args, **kwargs): - return self.inner_model(*args, **kwargs) - - def loss(self, input, noise, sigma, **kwargs): - c_skip, c_out, c_in = [utils.append_dims(x, input.ndim) for x in self.get_scalings(sigma)] - noised_input = input + noise * utils.append_dims(sigma, input.ndim) - model_output = self.get_v(noised_input * c_in, self.sigma_to_t(sigma), **kwargs) - target = (input - c_skip * noised_input) / c_out - return (model_output - target).pow(2).flatten(1).mean(1) - - def forward(self, input, sigma, **kwargs): - c_skip, c_out, c_in = [utils.append_dims(x, input.ndim) for x in self.get_scalings(sigma)] - return self.get_v(input * c_in, self.sigma_to_t(sigma), **kwargs) * c_out + input * c_skip - - -class CompVisVDenoiser(DiscreteVDDPMDenoiser): - """A wrapper for CompVis diffusion models that output v.""" - - def __init__(self, model, quantize=False, device='cpu'): - super().__init__(model, model.alphas_cumprod, quantize=quantize) - - def get_v(self, x, t, cond, **kwargs): - return self.inner_model.apply_model(x, t, cond) diff --git a/comfy/samplers.py b/comfy/samplers.py index 518b666db..92ba5f8ec 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -1,5 +1,4 @@ from .k_diffusion import sampling as k_diffusion_sampling -from .k_diffusion import external as k_diffusion_external from .extra_samplers import uni_pc import torch import enum From 88410ace9bd249e4647f0332f8f2bb46ea0aa540 Mon Sep 17 00:00:00 2001 From: Joseph Antolick Date: Wed, 1 Nov 2023 16:52:51 -0400 Subject: [PATCH 049/170] fix: handle null case for currentNode widgets to prevent scroll error --- web/extensions/core/contextMenuFilter.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/extensions/core/contextMenuFilter.js b/web/extensions/core/contextMenuFilter.js index 152cd7043..0a305391a 100644 --- a/web/extensions/core/contextMenuFilter.js +++ b/web/extensions/core/contextMenuFilter.js @@ -25,7 +25,7 @@ const ext = { requestAnimationFrame(() => { const currentNode = LGraphCanvas.active_canvas.current_node; const clickedComboValue = currentNode.widgets - .filter(w => w.type === "combo" && w.options.values.length === values.length) + ?.filter(w => w.type === "combo" && w.options.values.length === values.length) .find(w => w.options.values.every((v, i) => v === values[i])) ?.value; From ecb80abb58d53b2e88c03272645d3c059f86b931 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 1 Nov 2023 19:13:03 -0400 Subject: [PATCH 050/170] Allow ModelSamplingDiscrete to be instantiated without a model config. --- comfy/model_base.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/comfy/model_base.py b/comfy/model_base.py index 37a52debf..41d464e52 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -32,9 +32,12 @@ class V_PREDICTION(EPS): class ModelSamplingDiscrete(torch.nn.Module): - def __init__(self, model_config): + def __init__(self, model_config=None): super().__init__() - self._register_schedule(given_betas=None, beta_schedule=model_config.beta_schedule, timesteps=1000, linear_start=0.00085, linear_end=0.012, cosine_s=8e-3) + beta_schedule = "linear" + if model_config is not None: + beta_schedule = model_config.beta_schedule + self._register_schedule(given_betas=None, beta_schedule=beta_schedule, timesteps=1000, linear_start=0.00085, linear_end=0.012, cosine_s=8e-3) self.sigma_data = 1.0 def _register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, From 2455aaed8a50e7a9f89f70ce0eb84fe3f34fc971 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 1 Nov 2023 20:27:20 -0400 Subject: [PATCH 051/170] Allow model or clip to be None in load_lora_for_models. --- comfy/sd.py | 25 +++++++++++++++++++------ 1 file changed, 19 insertions(+), 6 deletions(-) diff --git a/comfy/sd.py b/comfy/sd.py index 4a2823c9d..65a61343b 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -55,13 +55,26 @@ def load_clip_weights(model, sd): def load_lora_for_models(model, clip, lora, strength_model, strength_clip): - key_map = comfy.lora.model_lora_keys_unet(model.model) - key_map = comfy.lora.model_lora_keys_clip(clip.cond_stage_model, key_map) + key_map = {} + if model is not None: + key_map = comfy.lora.model_lora_keys_unet(model.model, key_map) + if clip is not None: + key_map = comfy.lora.model_lora_keys_clip(clip.cond_stage_model, key_map) + loaded = comfy.lora.load_lora(lora, key_map) - new_modelpatcher = model.clone() - k = new_modelpatcher.add_patches(loaded, strength_model) - new_clip = clip.clone() - k1 = new_clip.add_patches(loaded, strength_clip) + if model is not None: + new_modelpatcher = model.clone() + k = new_modelpatcher.add_patches(loaded, strength_model) + else: + k = () + new_modelpatcher = None + + if clip is not None: + new_clip = clip.clone() + k1 = new_clip.add_patches(loaded, strength_clip) + else: + k1 = () + new_clip = None k = set(k) k1 = set(k1) for x in loaded: From d2e27b48f169b0e5def6b9b2c7874e4010282921 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 1 Nov 2023 20:49:37 -0400 Subject: [PATCH 052/170] sampler_cfg_function now gets the noisy output as argument again. This should make things that use sampler_cfg_function behave like before. Added an input argument for those that want the denoised output. This means you can calculate the x0 prediction of the model by doing: (input - cond) for example. --- comfy/samplers.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/comfy/samplers.py b/comfy/samplers.py index 92ba5f8ec..22a9b68ae 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -251,8 +251,8 @@ def sampling_function(model_function, x, timestep, uncond, cond, cond_scale, mod cond, uncond = calc_cond_uncond_batch(model_function, cond, uncond, x, timestep, max_total_area, model_options) if "sampler_cfg_function" in model_options: - args = {"cond": cond, "uncond": uncond, "cond_scale": cond_scale, "timestep": timestep} - return model_options["sampler_cfg_function"](args) + args = {"cond": x - cond, "uncond": x - uncond, "cond_scale": cond_scale, "timestep": timestep, "input": x} + return x - model_options["sampler_cfg_function"](args) else: return uncond + (cond - uncond) * cond_scale From 6e84a01ecc31ea0ab5c83bf0698e4b5d4027955e Mon Sep 17 00:00:00 2001 From: Matteo Spinelli Date: Thu, 2 Nov 2023 17:29:57 +0100 Subject: [PATCH 053/170] Refactor the template manager (#1878) * add drag-drop to node template manager * better dnd, save field on change * actually save templates --------- Co-authored-by: matt3o --- web/extensions/core/nodeTemplates.js | 239 +++++++++++++++++---------- 1 file changed, 154 insertions(+), 85 deletions(-) diff --git a/web/extensions/core/nodeTemplates.js b/web/extensions/core/nodeTemplates.js index 434491075..b6479f454 100644 --- a/web/extensions/core/nodeTemplates.js +++ b/web/extensions/core/nodeTemplates.js @@ -14,6 +14,9 @@ import { ComfyDialog, $el } from "../../scripts/ui.js"; // To delete/rename: // Right click the canvas // Node templates -> Manage +// +// To rearrange: +// Open the manage dialog and Drag and drop elements using the "Name:" label as handle const id = "Comfy.NodeTemplates"; @@ -22,6 +25,10 @@ class ManageTemplates extends ComfyDialog { super(); this.element.classList.add("comfy-manage-templates"); this.templates = this.load(); + this.draggedEl = null; + this.saveVisualCue = null; + this.emptyImg = new Image(); + this.emptyImg.src = 'data:image/gif;base64,R0lGODlhAQABAIAAAAUEBAAAACwAAAAAAQABAAACAkQBADs='; this.importInput = $el("input", { type: "file", @@ -35,14 +42,11 @@ class ManageTemplates extends ComfyDialog { createButtons() { const btns = super.createButtons(); - btns[0].textContent = "Cancel"; - btns.unshift( - $el("button", { - type: "button", - textContent: "Save", - onclick: () => this.save(), - }) - ); + btns[0].textContent = "Close"; + btns[0].onclick = (e) => { + clearTimeout(this.saveVisualCue); + this.close(); + }; btns.unshift( $el("button", { type: "button", @@ -71,25 +75,6 @@ class ManageTemplates extends ComfyDialog { } } - save() { - // Find all visible inputs and save them as our new list - const inputs = this.element.querySelectorAll("input"); - const updated = []; - - for (let i = 0; i < inputs.length; i++) { - const input = inputs[i]; - if (input.parentElement.style.display !== "none") { - const t = this.templates[i]; - t.name = input.value.trim() || input.getAttribute("data-name"); - updated.push(t); - } - } - - this.templates = updated; - this.store(); - this.close(); - } - store() { localStorage.setItem(id, JSON.stringify(this.templates)); } @@ -145,71 +130,155 @@ class ManageTemplates extends ComfyDialog { super.show( $el( "div", - { - style: { - display: "grid", - gridTemplateColumns: "1fr auto", - gap: "5px", - }, - }, - this.templates.flatMap((t) => { + {}, + this.templates.flatMap((t,i) => { let nameInput; return [ $el( - "label", + "div", { - textContent: "Name: ", + dataset: { id: i }, + className: "tempateManagerRow", + style: { + display: "grid", + gridTemplateColumns: "1fr auto", + border: "1px dashed transparent", + gap: "5px", + backgroundColor: "var(--comfy-menu-bg)" + }, + ondragstart: (e) => { + this.draggedEl = e.currentTarget; + e.currentTarget.style.opacity = "0.6"; + e.currentTarget.style.border = "1px dashed yellow"; + e.dataTransfer.effectAllowed = 'move'; + e.dataTransfer.setDragImage(this.emptyImg, 0, 0); + }, + ondragend: (e) => { + e.target.style.opacity = "1"; + e.currentTarget.style.border = "1px dashed transparent"; + e.currentTarget.removeAttribute("draggable"); + + // rearrange the elements in the localStorage + this.element.querySelectorAll('.tempateManagerRow').forEach((el,i) => { + var prev_i = el.dataset.id; + + if ( el == this.draggedEl && prev_i != i ) { + [this.templates[i], this.templates[prev_i]] = [this.templates[prev_i], this.templates[i]]; + } + el.dataset.id = i; + }); + this.store(); + }, + ondragover: (e) => { + e.preventDefault(); + if ( e.currentTarget == this.draggedEl ) + return; + + let rect = e.currentTarget.getBoundingClientRect(); + if (e.clientY > rect.top + rect.height / 2) { + e.currentTarget.parentNode.insertBefore(this.draggedEl, e.currentTarget.nextSibling); + } else { + e.currentTarget.parentNode.insertBefore(this.draggedEl, e.currentTarget); + } + } }, [ - $el("input", { - value: t.name, - dataset: { name: t.name }, - $: (el) => (nameInput = el), - }), + $el( + "label", + { + textContent: "Name: ", + style: { + cursor: "grab", + }, + onmousedown: (e) => { + // enable dragging only from the label + if (e.target.localName == 'label') + e.currentTarget.parentNode.draggable = 'true'; + } + }, + [ + $el("input", { + value: t.name, + dataset: { name: t.name }, + style: { + transitionProperty: 'background-color', + transitionDuration: '0s', + }, + onchange: (e) => { + clearTimeout(this.saveVisualCue); + var el = e.target; + var row = el.parentNode.parentNode; + this.templates[row.dataset.id].name = el.value.trim() || 'untitled'; + this.store(); + el.style.backgroundColor = 'rgb(40, 95, 40)'; + el.style.transitionDuration = '0s'; + this.saveVisualCue = setTimeout(function () { + el.style.transitionDuration = '.7s'; + el.style.backgroundColor = 'var(--comfy-input-bg)'; + }, 15); + }, + onkeypress: (e) => { + var el = e.target; + clearTimeout(this.saveVisualCue); + el.style.transitionDuration = '0s'; + el.style.backgroundColor = 'var(--comfy-input-bg)'; + }, + $: (el) => (nameInput = el), + }) + ] + ), + $el( + "div", + {}, + [ + $el("button", { + textContent: "Export", + style: { + fontSize: "12px", + fontWeight: "normal", + }, + onclick: (e) => { + const json = JSON.stringify({templates: [t]}, null, 2); // convert the data to a JSON string + const blob = new Blob([json], {type: "application/json"}); + const url = URL.createObjectURL(blob); + const a = $el("a", { + href: url, + download: (nameInput.value || t.name) + ".json", + style: {display: "none"}, + parent: document.body, + }); + a.click(); + setTimeout(function () { + a.remove(); + window.URL.revokeObjectURL(url); + }, 0); + }, + }), + $el("button", { + textContent: "Delete", + style: { + fontSize: "12px", + color: "red", + fontWeight: "normal", + }, + onclick: (e) => { + const item = e.target.parentNode.parentNode; + item.parentNode.removeChild(item); + this.templates.splice(item.dataset.id*1, 1); + this.store(); + // update the rows index, setTimeout ensures that the list is updated + var that = this; + setTimeout(function (){ + that.element.querySelectorAll('.tempateManagerRow').forEach((el,i) => { + el.dataset.id = i; + }); + }, 0); + }, + }), + ] + ), ] - ), - $el( - "div", - {}, - [ - $el("button", { - textContent: "Export", - style: { - fontSize: "12px", - fontWeight: "normal", - }, - onclick: (e) => { - const json = JSON.stringify({templates: [t]}, null, 2); // convert the data to a JSON string - const blob = new Blob([json], {type: "application/json"}); - const url = URL.createObjectURL(blob); - const a = $el("a", { - href: url, - download: (nameInput.value || t.name) + ".json", - style: {display: "none"}, - parent: document.body, - }); - a.click(); - setTimeout(function () { - a.remove(); - window.URL.revokeObjectURL(url); - }, 0); - }, - }), - $el("button", { - textContent: "Delete", - style: { - fontSize: "12px", - color: "red", - fontWeight: "normal", - }, - onclick: (e) => { - nameInput.value = ""; - e.target.parentElement.style.display = "none"; - e.target.parentElement.previousElementSibling.style.display = "none"; - }, - }), - ] - ), + ) ]; }) ) From ee74ef5c9ed9e9c8ecb967e6ce58ec74f664fd0c Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 2 Nov 2023 13:07:41 -0400 Subject: [PATCH 054/170] Increase maximum batch size in LatentRebatch. --- comfy_extras/nodes_rebatch.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy_extras/nodes_rebatch.py b/comfy_extras/nodes_rebatch.py index 0a9daf272..88a4ebe29 100644 --- a/comfy_extras/nodes_rebatch.py +++ b/comfy_extras/nodes_rebatch.py @@ -4,7 +4,7 @@ class LatentRebatch: @classmethod def INPUT_TYPES(s): return {"required": { "latents": ("LATENT",), - "batch_size": ("INT", {"default": 1, "min": 1, "max": 64}), + "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096}), }} RETURN_TYPES = ("LATENT",) INPUT_IS_LIST = True From ae2acfc21b984ee780e0e1329a3c7b7189903501 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 3 Nov 2023 13:11:16 -0400 Subject: [PATCH 055/170] Don't convert Nan to zero. Converting Nan to zero is a bad idea because it makes it hard to tell when something went wrong. --- comfy/samplers.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/comfy/samplers.py b/comfy/samplers.py index 22a9b68ae..964febb26 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -136,10 +136,10 @@ def sampling_function(model_function, x, timestep, uncond, cond, cond_scale, mod def calc_cond_uncond_batch(model_function, cond, uncond, x_in, timestep, max_total_area, model_options): out_cond = torch.zeros_like(x_in) - out_count = torch.zeros_like(x_in) + out_count = torch.ones_like(x_in) * 1e-37 out_uncond = torch.zeros_like(x_in) - out_uncond_count = torch.zeros_like(x_in) + out_uncond_count = torch.ones_like(x_in) * 1e-37 COND = 0 UNCOND = 1 @@ -239,9 +239,6 @@ def sampling_function(model_function, x, timestep, uncond, cond, cond_scale, mod del out_count out_uncond /= out_uncond_count del out_uncond_count - - torch.nan_to_num(out_cond, nan=0.0, posinf=0.0, neginf=0.0, out=out_cond) #in case out_count or out_uncond_count had some zeros - torch.nan_to_num(out_uncond, nan=0.0, posinf=0.0, neginf=0.0, out=out_uncond) return out_cond, out_uncond From 1ffa8858e7e50cbe84180e0c455621e7db0fe7c0 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sat, 4 Nov 2023 01:32:23 -0400 Subject: [PATCH 056/170] Move model sampling code to comfy/model_sampling.py --- comfy/model_base.py | 77 +--------------------------------------- comfy/model_sampling.py | 78 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 79 insertions(+), 76 deletions(-) create mode 100644 comfy/model_sampling.py diff --git a/comfy/model_base.py b/comfy/model_base.py index 41d464e52..d1a95daad 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -1,11 +1,9 @@ import torch from comfy.ldm.modules.diffusionmodules.openaimodel import UNetModel from comfy.ldm.modules.encoders.noise_aug_modules import CLIPEmbeddingNoiseAugmentation -from comfy.ldm.modules.diffusionmodules.util import make_beta_schedule from comfy.ldm.modules.diffusionmodules.openaimodel import Timestep import comfy.model_management import comfy.conds -import numpy as np from enum import Enum from . import utils @@ -14,79 +12,7 @@ class ModelType(Enum): V_PREDICTION = 2 -#NOTE: all this sampling stuff will be moved -class EPS: - def calculate_input(self, sigma, noise): - sigma = sigma.view(sigma.shape[:1] + (1,) * (noise.ndim - 1)) - return noise / (sigma ** 2 + self.sigma_data ** 2) ** 0.5 - - def calculate_denoised(self, sigma, model_output, model_input): - sigma = sigma.view(sigma.shape[:1] + (1,) * (model_output.ndim - 1)) - return model_input - model_output * sigma - - -class V_PREDICTION(EPS): - def calculate_denoised(self, sigma, model_output, model_input): - sigma = sigma.view(sigma.shape[:1] + (1,) * (model_output.ndim - 1)) - return model_input * self.sigma_data ** 2 / (sigma ** 2 + self.sigma_data ** 2) - model_output * sigma * self.sigma_data / (sigma ** 2 + self.sigma_data ** 2) ** 0.5 - - -class ModelSamplingDiscrete(torch.nn.Module): - def __init__(self, model_config=None): - super().__init__() - beta_schedule = "linear" - if model_config is not None: - beta_schedule = model_config.beta_schedule - self._register_schedule(given_betas=None, beta_schedule=beta_schedule, timesteps=1000, linear_start=0.00085, linear_end=0.012, cosine_s=8e-3) - self.sigma_data = 1.0 - - def _register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, - linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): - if given_betas is not None: - betas = given_betas - else: - betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) - alphas = 1. - betas - alphas_cumprod = torch.tensor(np.cumprod(alphas, axis=0), dtype=torch.float32) - # alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) - - timesteps, = betas.shape - self.num_timesteps = int(timesteps) - self.linear_start = linear_start - self.linear_end = linear_end - - # self.register_buffer('betas', torch.tensor(betas, dtype=torch.float32)) - # self.register_buffer('alphas_cumprod', torch.tensor(alphas_cumprod, dtype=torch.float32)) - # self.register_buffer('alphas_cumprod_prev', torch.tensor(alphas_cumprod_prev, dtype=torch.float32)) - - sigmas = ((1 - alphas_cumprod) / alphas_cumprod) ** 0.5 - - self.register_buffer('sigmas', sigmas) - self.register_buffer('log_sigmas', sigmas.log()) - - @property - def sigma_min(self): - return self.sigmas[0] - - @property - def sigma_max(self): - return self.sigmas[-1] - - def timestep(self, sigma): - log_sigma = sigma.log() - dists = log_sigma.to(self.log_sigmas.device) - self.log_sigmas[:, None] - return dists.abs().argmin(dim=0).view(sigma.shape) - - def sigma(self, timestep): - t = torch.clamp(timestep.float(), min=0, max=(len(self.sigmas) - 1)) - low_idx = t.floor().long() - high_idx = t.ceil().long() - w = t.frac() - log_sigma = (1 - w) * self.log_sigmas[low_idx] + w * self.log_sigmas[high_idx] - return log_sigma.exp() - - def percent_to_sigma(self, percent): - return self.sigma(torch.tensor(percent * 999.0)) +from comfy.model_sampling import EPS, V_PREDICTION, ModelSamplingDiscrete def model_sampling(model_config, model_type): if model_type == ModelType.EPS: @@ -102,7 +28,6 @@ def model_sampling(model_config, model_type): return ModelSampling(model_config) - class BaseModel(torch.nn.Module): def __init__(self, model_config, model_type=ModelType.EPS, device=None): super().__init__() diff --git a/comfy/model_sampling.py b/comfy/model_sampling.py new file mode 100644 index 000000000..5e2293238 --- /dev/null +++ b/comfy/model_sampling.py @@ -0,0 +1,78 @@ +import torch +import numpy as np +from comfy.ldm.modules.diffusionmodules.util import make_beta_schedule + + +class EPS: + def calculate_input(self, sigma, noise): + sigma = sigma.view(sigma.shape[:1] + (1,) * (noise.ndim - 1)) + return noise / (sigma ** 2 + self.sigma_data ** 2) ** 0.5 + + def calculate_denoised(self, sigma, model_output, model_input): + sigma = sigma.view(sigma.shape[:1] + (1,) * (model_output.ndim - 1)) + return model_input - model_output * sigma + + +class V_PREDICTION(EPS): + def calculate_denoised(self, sigma, model_output, model_input): + sigma = sigma.view(sigma.shape[:1] + (1,) * (model_output.ndim - 1)) + return model_input * self.sigma_data ** 2 / (sigma ** 2 + self.sigma_data ** 2) - model_output * sigma * self.sigma_data / (sigma ** 2 + self.sigma_data ** 2) ** 0.5 + + +class ModelSamplingDiscrete(torch.nn.Module): + def __init__(self, model_config=None): + super().__init__() + beta_schedule = "linear" + if model_config is not None: + beta_schedule = model_config.beta_schedule + self._register_schedule(given_betas=None, beta_schedule=beta_schedule, timesteps=1000, linear_start=0.00085, linear_end=0.012, cosine_s=8e-3) + self.sigma_data = 1.0 + + def _register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, + linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): + if given_betas is not None: + betas = given_betas + else: + betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) + alphas = 1. - betas + alphas_cumprod = torch.tensor(np.cumprod(alphas, axis=0), dtype=torch.float32) + # alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) + + timesteps, = betas.shape + self.num_timesteps = int(timesteps) + self.linear_start = linear_start + self.linear_end = linear_end + + # self.register_buffer('betas', torch.tensor(betas, dtype=torch.float32)) + # self.register_buffer('alphas_cumprod', torch.tensor(alphas_cumprod, dtype=torch.float32)) + # self.register_buffer('alphas_cumprod_prev', torch.tensor(alphas_cumprod_prev, dtype=torch.float32)) + + sigmas = ((1 - alphas_cumprod) / alphas_cumprod) ** 0.5 + + self.register_buffer('sigmas', sigmas) + self.register_buffer('log_sigmas', sigmas.log()) + + @property + def sigma_min(self): + return self.sigmas[0] + + @property + def sigma_max(self): + return self.sigmas[-1] + + def timestep(self, sigma): + log_sigma = sigma.log() + dists = log_sigma.to(self.log_sigmas.device) - self.log_sigmas[:, None] + return dists.abs().argmin(dim=0).view(sigma.shape) + + def sigma(self, timestep): + t = torch.clamp(timestep.float(), min=0, max=(len(self.sigmas) - 1)) + low_idx = t.floor().long() + high_idx = t.ceil().long() + w = t.frac() + log_sigma = (1 - w) * self.log_sigmas[low_idx] + w * self.log_sigmas[high_idx] + return log_sigma.exp() + + def percent_to_sigma(self, percent): + return self.sigma(torch.tensor(percent * 999.0)) + From 7e455adc071974d178fbdd7dde616f48787f6c51 Mon Sep 17 00:00:00 2001 From: gameltb Date: Sun, 5 Nov 2023 17:11:44 +0800 Subject: [PATCH 057/170] fix unet_wrapper_function name in ModelPatcher --- comfy/model_patcher.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/comfy/model_patcher.py b/comfy/model_patcher.py index 50b725b86..0efdf46e8 100644 --- a/comfy/model_patcher.py +++ b/comfy/model_patcher.py @@ -107,10 +107,10 @@ class ModelPatcher: for k in patch_list: if hasattr(patch_list[k], "to"): patch_list[k] = patch_list[k].to(device) - if "unet_wrapper_function" in self.model_options: - wrap_func = self.model_options["unet_wrapper_function"] + if "model_function_wrapper" in self.model_options: + wrap_func = self.model_options["model_function_wrapper"] if hasattr(wrap_func, "to"): - self.model_options["unet_wrapper_function"] = wrap_func.to(device) + self.model_options["model_function_wrapper"] = wrap_func.to(device) def model_dtype(self): if hasattr(self.model, "get_dtype"): From 02f062b5b7d8013e8d58a9c7e244aa8637b8062c Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sun, 5 Nov 2023 12:29:28 -0500 Subject: [PATCH 058/170] Sanitize unknown node types on load to prevent XSS. --- web/scripts/app.js | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/web/scripts/app.js b/web/scripts/app.js index 583310a27..638afd56c 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -5,6 +5,22 @@ import { api } from "./api.js"; import { defaultGraph } from "./defaultGraph.js"; import { getPngMetadata, getWebpMetadata, importA1111, getLatentMetadata } from "./pnginfo.js"; + +function sanitizeNodeName(string) { + let entityMap = { + '&': '', + '<': '', + '>': '', + '"': '', + "'": '', + '`': '', + '=': '' + }; + return String(string).replace(/[&<>"'`=\/]/g, function fromEntityMap (s) { + return entityMap[s]; + }); +} + /** * @typedef {import("types/comfy").ComfyExtension} ComfyExtension */ @@ -1480,6 +1496,7 @@ export class ComfyApp { // Find missing node types if (!(n.type in LiteGraph.registered_node_types)) { + n.type = sanitizeNodeName(n.type); missingNodeTypes.push(n.type); } } From 4acfc11a802fad4e90103f9fd3cf73cb0c9b5ae1 Mon Sep 17 00:00:00 2001 From: matt3o Date: Sun, 5 Nov 2023 19:00:23 +0100 Subject: [PATCH 059/170] add difference blend mode --- comfy_extras/nodes_post_processing.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/comfy_extras/nodes_post_processing.py b/comfy_extras/nodes_post_processing.py index 324cfe105..12704f545 100644 --- a/comfy_extras/nodes_post_processing.py +++ b/comfy_extras/nodes_post_processing.py @@ -23,7 +23,7 @@ class Blend: "max": 1.0, "step": 0.01 }), - "blend_mode": (["normal", "multiply", "screen", "overlay", "soft_light"],), + "blend_mode": (["normal", "multiply", "screen", "overlay", "soft_light", "difference"],), }, } @@ -54,6 +54,8 @@ class Blend: return torch.where(img1 <= 0.5, 2 * img1 * img2, 1 - 2 * (1 - img1) * (1 - img2)) elif mode == "soft_light": return torch.where(img2 <= 0.5, img1 - (1 - 2 * img2) * img1 * (1 - img1), img1 + (2 * img2 - 1) * (self.g(img1) - img1)) + elif mode == "difference": + return img1 - img2 else: raise ValueError(f"Unsupported blend mode: {mode}") From b3fcd64c6c9c57a8a83ceeff3e6eb7121b122f08 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 6 Nov 2023 01:09:18 -0500 Subject: [PATCH 060/170] Make SDTokenizer class work with more types of tokenizers. --- comfy/sd1_clip.py | 34 ++++++++++++++++++++++++---------- 1 file changed, 24 insertions(+), 10 deletions(-) diff --git a/comfy/sd1_clip.py b/comfy/sd1_clip.py index fdaa1e6c7..4761230a6 100644 --- a/comfy/sd1_clip.py +++ b/comfy/sd1_clip.py @@ -343,17 +343,24 @@ def load_embed(embedding_name, embedding_directory, embedding_size, embed_key=No return embed_out class SDTokenizer: - def __init__(self, tokenizer_path=None, max_length=77, pad_with_end=True, embedding_directory=None, embedding_size=768, embedding_key='clip_l'): + def __init__(self, tokenizer_path=None, max_length=77, pad_with_end=True, embedding_directory=None, embedding_size=768, embedding_key='clip_l', tokenizer_class=CLIPTokenizer, has_start_token=True, pad_to_max_length=True): if tokenizer_path is None: tokenizer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "sd1_tokenizer") - self.tokenizer = CLIPTokenizer.from_pretrained(tokenizer_path) + self.tokenizer = tokenizer_class.from_pretrained(tokenizer_path) self.max_length = max_length - self.max_tokens_per_section = self.max_length - 2 empty = self.tokenizer('')["input_ids"] - self.start_token = empty[0] - self.end_token = empty[1] + if has_start_token: + self.tokens_start = 1 + self.start_token = empty[0] + self.end_token = empty[1] + else: + self.tokens_start = 0 + self.start_token = None + self.end_token = empty[0] self.pad_with_end = pad_with_end + self.pad_to_max_length = pad_to_max_length + vocab = self.tokenizer.get_vocab() self.inv_vocab = {v: k for k, v in vocab.items()} self.embedding_directory = embedding_directory @@ -414,11 +421,13 @@ class SDTokenizer: else: continue #parse word - tokens.append([(t, weight) for t in self.tokenizer(word)["input_ids"][1:-1]]) + tokens.append([(t, weight) for t in self.tokenizer(word)["input_ids"][self.tokens_start:-1]]) #reshape token array to CLIP input size batched_tokens = [] - batch = [(self.start_token, 1.0, 0)] + batch = [] + if self.start_token is not None: + batch.append((self.start_token, 1.0, 0)) batched_tokens.append(batch) for i, t_group in enumerate(tokens): #determine if we're going to try and keep the tokens in a single batch @@ -435,16 +444,21 @@ class SDTokenizer: #add end token and pad else: batch.append((self.end_token, 1.0, 0)) - batch.extend([(pad_token, 1.0, 0)] * (remaining_length)) + if self.pad_to_max_length: + batch.extend([(pad_token, 1.0, 0)] * (remaining_length)) #start new batch - batch = [(self.start_token, 1.0, 0)] + batch = [] + if self.start_token is not None: + batch.append((self.start_token, 1.0, 0)) batched_tokens.append(batch) else: batch.extend([(t,w,i+1) for t,w in t_group]) t_group = [] #fill last batch - batch.extend([(self.end_token, 1.0, 0)] + [(pad_token, 1.0, 0)] * (self.max_length - len(batch) - 1)) + batch.append((self.end_token, 1.0, 0)) + if self.pad_to_max_length: + batch.extend([(pad_token, 1.0, 0)] * (self.max_length - len(batch))) if not return_word_ids: batched_tokens = [[(t, w) for t, w,_ in x] for x in batched_tokens] From 656c0b5d90239efb8be4281d2c16d52ca722064c Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 6 Nov 2023 13:43:50 -0500 Subject: [PATCH 061/170] CLIP code refactor and improvements. More generic clip model class that can be used on more types of text encoders. Don't apply weighting algorithm when weight is 1.0 Don't compute an empty token output when it's not needed. --- comfy/sd1_clip.py | 84 ++++++++++++++++++++++++++++++++-------------- comfy/sd2_clip.py | 3 +- comfy/sdxl_clip.py | 8 ++--- 3 files changed, 62 insertions(+), 33 deletions(-) diff --git a/comfy/sd1_clip.py b/comfy/sd1_clip.py index 4761230a6..7db7ee0f4 100644 --- a/comfy/sd1_clip.py +++ b/comfy/sd1_clip.py @@ -8,32 +8,54 @@ import zipfile from . import model_management import contextlib +def gen_empty_tokens(special_tokens, length): + start_token = special_tokens.get("start", None) + end_token = special_tokens.get("end", None) + pad_token = special_tokens.get("pad") + output = [] + if start_token is not None: + output.append(start_token) + if end_token is not None: + output.append(end_token) + output += [pad_token] * (length - len(output)) + return output + class ClipTokenWeightEncoder: def encode_token_weights(self, token_weight_pairs): - to_encode = list(self.empty_tokens) + to_encode = list() + max_token_len = 0 + has_weights = False for x in token_weight_pairs: tokens = list(map(lambda a: a[0], x)) + max_token_len = max(len(tokens), max_token_len) + has_weights = has_weights or not all(map(lambda a: a[1] == 1.0, x)) to_encode.append(tokens) + sections = len(to_encode) + if has_weights or sections == 0: + to_encode.append(gen_empty_tokens(self.special_tokens, max_token_len)) + out, pooled = self.encode(to_encode) - z_empty = out[0:1] - if pooled.shape[0] > 1: - first_pooled = pooled[1:2] + if pooled is not None: + first_pooled = pooled[0:1].cpu() else: - first_pooled = pooled[0:1] + first_pooled = pooled output = [] - for k in range(1, out.shape[0]): + for k in range(0, sections): z = out[k:k+1] - for i in range(len(z)): - for j in range(len(z[i])): - weight = token_weight_pairs[k - 1][j][1] - z[i][j] = (z[i][j] - z_empty[0][j]) * weight + z_empty[0][j] + if has_weights: + z_empty = out[-1] + for i in range(len(z)): + for j in range(len(z[i])): + weight = token_weight_pairs[k][j][1] + if weight != 1.0: + z[i][j] = (z[i][j] - z_empty[j]) * weight + z_empty[j] output.append(z) if (len(output) == 0): - return z_empty.cpu(), first_pooled.cpu() - return torch.cat(output, dim=-2).cpu(), first_pooled.cpu() + return out[-1:].cpu(), first_pooled + return torch.cat(output, dim=-2).cpu(), first_pooled class SDClipModel(torch.nn.Module, ClipTokenWeightEncoder): """Uses the CLIP transformer encoder for text (from huggingface)""" @@ -43,37 +65,43 @@ class SDClipModel(torch.nn.Module, ClipTokenWeightEncoder): "hidden" ] def __init__(self, version="openai/clip-vit-large-patch14", device="cpu", max_length=77, - freeze=True, layer="last", layer_idx=None, textmodel_json_config=None, textmodel_path=None, dtype=None): # clip-vit-base-patch32 + freeze=True, layer="last", layer_idx=None, textmodel_json_config=None, textmodel_path=None, dtype=None, + special_tokens={"start": 49406, "end": 49407, "pad": 49407},layer_norm_hidden_state=True, config_class=CLIPTextConfig, + model_class=CLIPTextModel, inner_name="text_model"): # clip-vit-base-patch32 super().__init__() assert layer in self.LAYERS self.num_layers = 12 if textmodel_path is not None: - self.transformer = CLIPTextModel.from_pretrained(textmodel_path) + self.transformer = model_class.from_pretrained(textmodel_path) else: if textmodel_json_config is None: textmodel_json_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), "sd1_clip_config.json") - config = CLIPTextConfig.from_json_file(textmodel_json_config) + config = config_class.from_json_file(textmodel_json_config) self.num_layers = config.num_hidden_layers with comfy.ops.use_comfy_ops(device, dtype): with modeling_utils.no_init_weights(): - self.transformer = CLIPTextModel(config) + self.transformer = model_class(config) + self.inner_name = inner_name if dtype is not None: self.transformer.to(dtype) - self.transformer.text_model.embeddings.token_embedding.to(torch.float32) - self.transformer.text_model.embeddings.position_embedding.to(torch.float32) + inner_model = getattr(self.transformer, self.inner_name) + if hasattr(inner_model, "embeddings"): + inner_model.embeddings.to(torch.float32) + else: + self.transformer.set_input_embeddings(self.transformer.get_input_embeddings().to(torch.float32)) self.max_length = max_length if freeze: self.freeze() self.layer = layer self.layer_idx = None - self.empty_tokens = [[49406] + [49407] * 76] + self.special_tokens = special_tokens self.text_projection = torch.nn.Parameter(torch.eye(self.transformer.get_input_embeddings().weight.shape[1])) self.logit_scale = torch.nn.Parameter(torch.tensor(4.6055)) self.enable_attention_masks = False - self.layer_norm_hidden_state = True + self.layer_norm_hidden_state = layer_norm_hidden_state if layer == "hidden": assert layer_idx is not None assert abs(layer_idx) <= self.num_layers @@ -117,7 +145,7 @@ class SDClipModel(torch.nn.Module, ClipTokenWeightEncoder): else: print("WARNING: shape mismatch when trying to apply embedding, embedding will be ignored", y.shape[0], current_embeds.weight.shape[1]) while len(tokens_temp) < len(x): - tokens_temp += [self.empty_tokens[0][-1]] + tokens_temp += [self.special_tokens["pad"]] out_tokens += [tokens_temp] n = token_dict_size @@ -142,7 +170,7 @@ class SDClipModel(torch.nn.Module, ClipTokenWeightEncoder): tokens = self.set_up_textual_embeddings(tokens, backup_embeds) tokens = torch.LongTensor(tokens).to(device) - if self.transformer.text_model.final_layer_norm.weight.dtype != torch.float32: + if getattr(self.transformer, self.inner_name).final_layer_norm.weight.dtype != torch.float32: precision_scope = torch.autocast else: precision_scope = lambda a, b: contextlib.nullcontext(a) @@ -168,12 +196,16 @@ class SDClipModel(torch.nn.Module, ClipTokenWeightEncoder): else: z = outputs.hidden_states[self.layer_idx] if self.layer_norm_hidden_state: - z = self.transformer.text_model.final_layer_norm(z) + z = getattr(self.transformer, self.inner_name).final_layer_norm(z) - pooled_output = outputs.pooler_output - if self.text_projection is not None: + if hasattr(outputs, "pooler_output"): + pooled_output = outputs.pooler_output.float() + else: + pooled_output = None + + if self.text_projection is not None and pooled_output is not None: pooled_output = pooled_output.float().to(self.text_projection.device) @ self.text_projection.float() - return z.float(), pooled_output.float() + return z.float(), pooled_output def encode(self, tokens): return self(tokens) diff --git a/comfy/sd2_clip.py b/comfy/sd2_clip.py index ebabf7ccd..2ee0ca055 100644 --- a/comfy/sd2_clip.py +++ b/comfy/sd2_clip.py @@ -9,8 +9,7 @@ class SD2ClipHModel(sd1_clip.SDClipModel): layer_idx=23 textmodel_json_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), "sd2_clip_config.json") - super().__init__(device=device, freeze=freeze, layer=layer, layer_idx=layer_idx, textmodel_json_config=textmodel_json_config, textmodel_path=textmodel_path, dtype=dtype) - self.empty_tokens = [[49406] + [49407] + [0] * 75] + super().__init__(device=device, freeze=freeze, layer=layer, layer_idx=layer_idx, textmodel_json_config=textmodel_json_config, textmodel_path=textmodel_path, dtype=dtype, special_tokens={"start": 49406, "end": 49407, "pad": 0}) class SD2ClipHTokenizer(sd1_clip.SDTokenizer): def __init__(self, tokenizer_path=None, embedding_directory=None): diff --git a/comfy/sdxl_clip.py b/comfy/sdxl_clip.py index 4c508a0ea..673399e22 100644 --- a/comfy/sdxl_clip.py +++ b/comfy/sdxl_clip.py @@ -9,9 +9,8 @@ class SDXLClipG(sd1_clip.SDClipModel): layer_idx=-2 textmodel_json_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), "clip_config_bigg.json") - super().__init__(device=device, freeze=freeze, layer=layer, layer_idx=layer_idx, textmodel_json_config=textmodel_json_config, textmodel_path=textmodel_path, dtype=dtype) - self.empty_tokens = [[49406] + [49407] + [0] * 75] - self.layer_norm_hidden_state = False + super().__init__(device=device, freeze=freeze, layer=layer, layer_idx=layer_idx, textmodel_json_config=textmodel_json_config, textmodel_path=textmodel_path, dtype=dtype, + special_tokens={"start": 49406, "end": 49407, "pad": 0}, layer_norm_hidden_state=False) def load_sd(self, sd): return super().load_sd(sd) @@ -38,8 +37,7 @@ class SDXLTokenizer: class SDXLClipModel(torch.nn.Module): def __init__(self, device="cpu", dtype=None): super().__init__() - self.clip_l = sd1_clip.SDClipModel(layer="hidden", layer_idx=11, device=device, dtype=dtype) - self.clip_l.layer_norm_hidden_state = False + self.clip_l = sd1_clip.SDClipModel(layer="hidden", layer_idx=11, device=device, dtype=dtype, layer_norm_hidden_state=False) self.clip_g = SDXLClipG(device=device, dtype=dtype) def clip_layer(self, layer_idx): From 844dbf97a71b398301e1a6318c6776bc5b1f5b7e Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 7 Nov 2023 03:28:53 -0500 Subject: [PATCH 062/170] Add: advanced->model->ModelSamplingDiscrete node. This allows changing the sampling parameters of the model (eps or vpred) or set the model to use zsnr. --- comfy/model_patcher.py | 17 +++++++++ comfy/model_sampling.py | 2 + comfy_extras/nodes_model_advanced.py | 57 ++++++++++++++++++++++++++++ nodes.py | 1 + 4 files changed, 77 insertions(+) create mode 100644 comfy_extras/nodes_model_advanced.py diff --git a/comfy/model_patcher.py b/comfy/model_patcher.py index 0efdf46e8..0f5385597 100644 --- a/comfy/model_patcher.py +++ b/comfy/model_patcher.py @@ -11,6 +11,8 @@ class ModelPatcher: self.model = model self.patches = {} self.backup = {} + self.object_patches = {} + self.object_patches_backup = {} self.model_options = {"transformer_options":{}} self.model_size() self.load_device = load_device @@ -91,6 +93,9 @@ class ModelPatcher: def set_model_output_block_patch(self, patch): self.set_model_patch(patch, "output_block_patch") + def add_object_patch(self, name, obj): + self.object_patches[name] = obj + def model_patches_to(self, device): to = self.model_options["transformer_options"] if "patches" in to: @@ -150,6 +155,12 @@ class ModelPatcher: return sd def patch_model(self, device_to=None): + for k in self.object_patches: + old = getattr(self.model, k) + if k not in self.object_patches_backup: + self.object_patches_backup[k] = old + setattr(self.model, k, self.object_patches[k]) + model_sd = self.model_state_dict() for key in self.patches: if key not in model_sd: @@ -290,3 +301,9 @@ class ModelPatcher: if device_to is not None: self.model.to(device_to) self.current_device = device_to + + keys = list(self.object_patches_backup.keys()) + for k in keys: + setattr(self.model, k, self.object_patches_backup[k]) + + self.object_patches_backup = {} diff --git a/comfy/model_sampling.py b/comfy/model_sampling.py index 5e2293238..a2935d47d 100644 --- a/comfy/model_sampling.py +++ b/comfy/model_sampling.py @@ -48,7 +48,9 @@ class ModelSamplingDiscrete(torch.nn.Module): # self.register_buffer('alphas_cumprod_prev', torch.tensor(alphas_cumprod_prev, dtype=torch.float32)) sigmas = ((1 - alphas_cumprod) / alphas_cumprod) ** 0.5 + self.set_sigmas(sigmas) + def set_sigmas(self, sigmas): self.register_buffer('sigmas', sigmas) self.register_buffer('log_sigmas', sigmas.log()) diff --git a/comfy_extras/nodes_model_advanced.py b/comfy_extras/nodes_model_advanced.py new file mode 100644 index 000000000..c02cfb05a --- /dev/null +++ b/comfy_extras/nodes_model_advanced.py @@ -0,0 +1,57 @@ +import folder_paths +import comfy.sd +import comfy.model_sampling + + +def rescale_zero_terminal_snr_sigmas(sigmas): + alphas_cumprod = 1 / ((sigmas * sigmas) + 1) + alphas_bar_sqrt = alphas_cumprod.sqrt() + + # Store old values. + alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone() + alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone() + + # Shift so the last timestep is zero. + alphas_bar_sqrt -= (alphas_bar_sqrt_T) + + # Scale so the first timestep is back to the old value. + alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T) + + # Convert alphas_bar_sqrt to betas + alphas_bar = alphas_bar_sqrt**2 # Revert sqrt + alphas_bar[-1] = 4.8973451890853435e-08 + return ((1 - alphas_bar) / alphas_bar) ** 0.5 + +class ModelSamplingDiscrete: + @classmethod + def INPUT_TYPES(s): + return {"required": { "model": ("MODEL",), + "sampling": (["eps", "v_prediction"],), + "zsnr": ("BOOLEAN", {"default": False}), + }} + + RETURN_TYPES = ("MODEL",) + FUNCTION = "patch" + + CATEGORY = "advanced/model" + + def patch(self, model, sampling, zsnr): + m = model.clone() + + if sampling == "eps": + sampling_type = comfy.model_sampling.EPS + elif sampling == "v_prediction": + sampling_type = comfy.model_sampling.V_PREDICTION + + class ModelSamplingAdvanced(comfy.model_sampling.ModelSamplingDiscrete, sampling_type): + pass + + model_sampling = ModelSamplingAdvanced() + if zsnr: + model_sampling.set_sigmas(rescale_zero_terminal_snr_sigmas(model_sampling.sigmas)) + m.add_object_patch("model_sampling", model_sampling) + return (m, ) + +NODE_CLASS_MAPPINGS = { + "ModelSamplingDiscrete": ModelSamplingDiscrete, +} diff --git a/nodes.py b/nodes.py index 61ebbb8b4..5ed015442 100644 --- a/nodes.py +++ b/nodes.py @@ -1798,6 +1798,7 @@ def init_custom_nodes(): "nodes_freelunch.py", "nodes_custom_sampler.py", "nodes_hypertile.py", + "nodes_model_advanced.py", ] for node_file in extras_files: From 2a23ba0b8c225b59902423ef08db0de39d2ed7e7 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 7 Nov 2023 04:30:37 -0500 Subject: [PATCH 063/170] Fix unet ops not entirely on GPU. --- comfy/ldm/modules/diffusionmodules/util.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/comfy/ldm/modules/diffusionmodules/util.py b/comfy/ldm/modules/diffusionmodules/util.py index d890c8044..0298ca99d 100644 --- a/comfy/ldm/modules/diffusionmodules/util.py +++ b/comfy/ldm/modules/diffusionmodules/util.py @@ -170,8 +170,8 @@ def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False): if not repeat_only: half = dim // 2 freqs = torch.exp( - -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half - ).to(device=timesteps.device) + -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32, device=timesteps.device) / half + ) args = timesteps[:, None].float() * freqs[None] embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1) if dim % 2: From a527d0c795ba5572708095fcf0f9366e2076ba7e Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 7 Nov 2023 19:33:40 -0500 Subject: [PATCH 064/170] Code refactor. --- .../modules/diffusionmodules/openaimodel.py | 22 +++++++++---------- 1 file changed, 10 insertions(+), 12 deletions(-) diff --git a/comfy/ldm/modules/diffusionmodules/openaimodel.py b/comfy/ldm/modules/diffusionmodules/openaimodel.py index 7dfdfc0a2..6c2113e3e 100644 --- a/comfy/ldm/modules/diffusionmodules/openaimodel.py +++ b/comfy/ldm/modules/diffusionmodules/openaimodel.py @@ -251,6 +251,12 @@ class Timestep(nn.Module): def forward(self, t): return timestep_embedding(t, self.dim) +def apply_control(h, control, name): + if control is not None and name in control and len(control[name]) > 0: + ctrl = control[name].pop() + if ctrl is not None: + h += ctrl + return h class UNetModel(nn.Module): """ @@ -617,25 +623,17 @@ class UNetModel(nn.Module): for id, module in enumerate(self.input_blocks): transformer_options["block"] = ("input", id) h = forward_timestep_embed(module, h, emb, context, transformer_options) - if control is not None and 'input' in control and len(control['input']) > 0: - ctrl = control['input'].pop() - if ctrl is not None: - h += ctrl + h = apply_control(h, control, 'input') hs.append(h) + transformer_options["block"] = ("middle", 0) h = forward_timestep_embed(self.middle_block, h, emb, context, transformer_options) - if control is not None and 'middle' in control and len(control['middle']) > 0: - ctrl = control['middle'].pop() - if ctrl is not None: - h += ctrl + h = apply_control(h, control, 'middle') for id, module in enumerate(self.output_blocks): transformer_options["block"] = ("output", id) hsp = hs.pop() - if control is not None and 'output' in control and len(control['output']) > 0: - ctrl = control['output'].pop() - if ctrl is not None: - hsp += ctrl + h = apply_control(h, control, 'output') if "output_block_patch" in transformer_patches: patch = transformer_patches["output_block_patch"] From fe40109b57bc3cf3c79c98f34c06041bf917f22f Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 7 Nov 2023 22:15:15 -0500 Subject: [PATCH 065/170] Fix issue with object patches not being copied with patcher. --- comfy/model_patcher.py | 1 + 1 file changed, 1 insertion(+) diff --git a/comfy/model_patcher.py b/comfy/model_patcher.py index 0f5385597..55800e86e 100644 --- a/comfy/model_patcher.py +++ b/comfy/model_patcher.py @@ -40,6 +40,7 @@ class ModelPatcher: for k in self.patches: n.patches[k] = self.patches[k][:] + n.object_patches = self.object_patches.copy() n.model_options = copy.deepcopy(self.model_options) n.model_keys = self.model_keys return n From 0a6fd49a3ef730741fc5f43ca89f3fadd3401129 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 7 Nov 2023 22:15:55 -0500 Subject: [PATCH 066/170] Print leftover keys when using the UNETLoader. --- comfy/sd.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/comfy/sd.py b/comfy/sd.py index 65a61343b..65d94f46e 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -496,6 +496,9 @@ def load_unet(unet_path): #load unet in diffusers format model = model_config.get_model(new_sd, "") model = model.to(offload_device) model.load_model_weights(new_sd, "") + left_over = sd.keys() + if len(left_over) > 0: + print("left over keys in unet:", left_over) return comfy.model_patcher.ModelPatcher(model, load_device=model_management.get_torch_device(), offload_device=offload_device) def save_checkpoint(output_path, model, clip, vae, metadata=None): From 794dd2064d82988fd63250f3e79b226cfdbc4e93 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 7 Nov 2023 23:41:55 -0500 Subject: [PATCH 067/170] Fix typo. --- comfy/ldm/modules/diffusionmodules/openaimodel.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/ldm/modules/diffusionmodules/openaimodel.py b/comfy/ldm/modules/diffusionmodules/openaimodel.py index 6c2113e3e..49c1e8cbb 100644 --- a/comfy/ldm/modules/diffusionmodules/openaimodel.py +++ b/comfy/ldm/modules/diffusionmodules/openaimodel.py @@ -633,7 +633,7 @@ class UNetModel(nn.Module): for id, module in enumerate(self.output_blocks): transformer_options["block"] = ("output", id) hsp = hs.pop() - h = apply_control(h, control, 'output') + hsp = apply_control(hsp, control, 'output') if "output_block_patch" in transformer_patches: patch = transformer_patches["output_block_patch"] From 064d7583ebc0d6f9c0c4d28da76717d99230a64d Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 8 Nov 2023 01:59:09 -0500 Subject: [PATCH 068/170] Add a CONDConstant for passing non tensor conds to unet. --- comfy/conds.py | 15 +++++++++++++++ comfy/model_base.py | 5 ++++- 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/comfy/conds.py b/comfy/conds.py index 1e3111baf..6cff25184 100644 --- a/comfy/conds.py +++ b/comfy/conds.py @@ -62,3 +62,18 @@ class CONDCrossAttn(CONDRegular): c = c.repeat(1, crossattn_max_len // c.shape[1], 1) #padding with repeat doesn't change result out.append(c) return torch.cat(out) + +class CONDConstant(CONDRegular): + def __init__(self, cond): + self.cond = cond + + def process_cond(self, batch_size, device, **kwargs): + return self._copy_with(self.cond) + + def can_concat(self, other): + if self.cond != other.cond: + return False + return True + + def concat(self, others): + return self.cond diff --git a/comfy/model_base.py b/comfy/model_base.py index d1a95daad..7ba253470 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -61,7 +61,10 @@ class BaseModel(torch.nn.Module): context = context.to(dtype) extra_conds = {} for o in kwargs: - extra_conds[o] = kwargs[o].to(dtype) + extra = kwargs[o] + if hasattr(extra, "to"): + extra = extra.to(dtype) + extra_conds[o] = extra model_output = self.diffusion_model(xc, t, context=context, control=control, transformer_options=transformer_options, **extra_conds).float() return self.model_sampling.calculate_denoised(sigma, model_output, x) From ec120001363271ca039c8e07dabd8837df6498cd Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 8 Nov 2023 22:05:31 -0500 Subject: [PATCH 069/170] Add support for full diff lora keys. --- comfy/lora.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/comfy/lora.py b/comfy/lora.py index d4cf94c95..29c59d893 100644 --- a/comfy/lora.py +++ b/comfy/lora.py @@ -131,6 +131,18 @@ def load_lora(lora, to_load): loaded_keys.add(b_norm_name) patch_dict["{}.bias".format(to_load[x][:-len(".weight")])] = (b_norm,) + diff_name = "{}.diff".format(x) + diff_weight = lora.get(diff_name, None) + if diff_weight is not None: + patch_dict[to_load[x]] = (diff_weight,) + loaded_keys.add(diff_name) + + diff_bias_name = "{}.diff_b".format(x) + diff_bias = lora.get(diff_bias_name, None) + if diff_bias is not None: + patch_dict["{}.bias".format(to_load[x][:-len(".weight")])] = (diff_bias,) + loaded_keys.add(diff_bias_name) + for x in lora.keys(): if x not in loaded_keys: print("lora key not loaded", x) From cd6df8b323d4d7d32730f4460f76795dd9b8ca60 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 9 Nov 2023 13:10:19 -0500 Subject: [PATCH 070/170] Fix sanitize node name removing the "/" character. --- web/scripts/app.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/scripts/app.js b/web/scripts/app.js index 638afd56c..50e205222 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -16,7 +16,7 @@ function sanitizeNodeName(string) { '`': '', '=': '' }; - return String(string).replace(/[&<>"'`=\/]/g, function fromEntityMap (s) { + return String(string).replace(/[&<>"'`=]/g, function fromEntityMap (s) { return entityMap[s]; }); } From 72e3feb5735adc7b968c2bc8d0b5cd5e9bea9c59 Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Thu, 9 Nov 2023 18:33:43 +0000 Subject: [PATCH 071/170] Load API JSON (#1932) * added loading api json * revert async change * reorder --- web/scripts/app.js | 69 ++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 60 insertions(+), 9 deletions(-) diff --git a/web/scripts/app.js b/web/scripts/app.js index 50e205222..61b88d44b 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -1469,6 +1469,17 @@ export class ComfyApp { localStorage.setItem("litegrapheditor_clipboard", old); } + showMissingNodesError(missingNodeTypes, hasAddedNodes = true) { + this.ui.dialog.show( + `When loading the graph, the following node types were not found:
    ${Array.from(new Set(missingNodeTypes)).map( + (t) => `
  • ${t}
  • ` + ).join("")}
${hasAddedNodes ? "Nodes that have failed to load will show as red on the graph." : ""}` + ); + this.logging.addEntry("Comfy.App", "warn", { + MissingNodes: missingNodeTypes, + }); + } + /** * Populates the graph with the specified workflow data * @param {*} graphData A serialized graph object @@ -1587,14 +1598,7 @@ export class ComfyApp { } if (missingNodeTypes.length) { - this.ui.dialog.show( - `When loading the graph, the following node types were not found:
    ${Array.from(new Set(missingNodeTypes)).map( - (t) => `
  • ${t}
  • ` - ).join("")}
Nodes that have failed to load will show as red on the graph.` - ); - this.logging.addEntry("Comfy.App", "warn", { - MissingNodes: missingNodeTypes, - }); + this.showMissingNodesError(missingNodeTypes); } } @@ -1825,9 +1829,11 @@ export class ComfyApp { } else if (file.type === "application/json" || file.name?.endsWith(".json")) { const reader = new FileReader(); reader.onload = () => { - var jsonContent = JSON.parse(reader.result); + const jsonContent = JSON.parse(reader.result); if (jsonContent?.templates) { this.loadTemplateData(jsonContent); + } else if(this.isApiJson(jsonContent)) { + this.loadApiJson(jsonContent); } else { this.loadGraphData(jsonContent); } @@ -1841,6 +1847,51 @@ export class ComfyApp { } } + isApiJson(data) { + return Object.values(data).every((v) => v.class_type); + } + + loadApiJson(apiData) { + const missingNodeTypes = Object.values(apiData).filter((n) => !LiteGraph.registered_node_types[n.class_type]); + if (missingNodeTypes.length) { + this.showMissingNodesError(missingNodeTypes.map(t => t.class_type), false); + return; + } + + const ids = Object.keys(apiData); + app.graph.clear(); + for (const id of ids) { + const data = apiData[id]; + const node = LiteGraph.createNode(data.class_type); + node.id = id; + graph.add(node); + } + + for (const id of ids) { + const data = apiData[id]; + const node = app.graph.getNodeById(id); + for (const input in data.inputs ?? {}) { + const value = data.inputs[input]; + if (value instanceof Array) { + const [fromId, fromSlot] = value; + const fromNode = app.graph.getNodeById(fromId); + const toSlot = node.inputs?.findIndex((inp) => inp.name === input); + if (toSlot !== -1) { + fromNode.connect(fromSlot, node, toSlot); + } + } else { + const widget = node.widgets?.find((w) => w.name === input); + if (widget) { + widget.value = value; + widget.callback?.(value); + } + } + } + } + + app.graph.arrange(); + } + /** * Registers a Comfy web extension with the app * @param {ComfyExtension} extension From ca71e542d2123cc58ef9c5884f67a6a211e4c41a Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 9 Nov 2023 17:35:17 -0500 Subject: [PATCH 072/170] Lower cfg step to 0.1 in sampler nodes. --- comfy_extras/nodes_custom_sampler.py | 2 +- nodes.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/comfy_extras/nodes_custom_sampler.py b/comfy_extras/nodes_custom_sampler.py index b52ad8fbd..154ecd0d2 100644 --- a/comfy_extras/nodes_custom_sampler.py +++ b/comfy_extras/nodes_custom_sampler.py @@ -188,7 +188,7 @@ class SamplerCustom: {"model": ("MODEL",), "add_noise": ("BOOLEAN", {"default": True}), "noise_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), - "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "step":0.5, "round": 0.01}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "step":0.1, "round": 0.01}), "positive": ("CONDITIONING", ), "negative": ("CONDITIONING", ), "sampler": ("SAMPLER", ), diff --git a/nodes.py b/nodes.py index 5ed015442..2bbfd8fe8 100644 --- a/nodes.py +++ b/nodes.py @@ -1218,7 +1218,7 @@ class KSampler: {"model": ("MODEL",), "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), - "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "step":0.5, "round": 0.01}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "step":0.1, "round": 0.01}), "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ), "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ), "positive": ("CONDITIONING", ), @@ -1244,7 +1244,7 @@ class KSamplerAdvanced: "add_noise": (["enable", "disable"], ), "noise_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), - "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "step":0.5, "round": 0.01}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "step":0.1, "round": 0.01}), "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ), "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ), "positive": ("CONDITIONING", ), From 002aefa382585d171aef13c7bd21f64b8664fe28 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 9 Nov 2023 17:57:51 -0500 Subject: [PATCH 073/170] Support lcm models. Use the "lcm" sampler to sample them, you also have to use the ModelSamplingDiscrete node to set them as lcm models to use them properly. --- comfy/k_diffusion/sampling.py | 15 +++++- comfy/samplers.py | 2 +- comfy_extras/nodes_model_advanced.py | 75 +++++++++++++++++++++++++++- 3 files changed, 88 insertions(+), 4 deletions(-) diff --git a/comfy/k_diffusion/sampling.py b/comfy/k_diffusion/sampling.py index 937c5a388..dd6f7bbe5 100644 --- a/comfy/k_diffusion/sampling.py +++ b/comfy/k_diffusion/sampling.py @@ -717,7 +717,6 @@ def DDPMSampler_step(x, sigma, sigma_prev, noise, noise_sampler): mu += ((1 - alpha) * (1. - alpha_cumprod_prev) / (1. - alpha_cumprod)).sqrt() * noise_sampler(sigma, sigma_prev) return mu - def generic_step_sampler(model, x, sigmas, extra_args=None, callback=None, disable=None, noise_sampler=None, step_function=None): extra_args = {} if extra_args is None else extra_args noise_sampler = default_noise_sampler(x) if noise_sampler is None else noise_sampler @@ -737,3 +736,17 @@ def generic_step_sampler(model, x, sigmas, extra_args=None, callback=None, disab def sample_ddpm(model, x, sigmas, extra_args=None, callback=None, disable=None, noise_sampler=None): return generic_step_sampler(model, x, sigmas, extra_args, callback, disable, noise_sampler, DDPMSampler_step) +@torch.no_grad() +def sample_lcm(model, x, sigmas, extra_args=None, callback=None, disable=None, noise_sampler=None): + extra_args = {} if extra_args is None else extra_args + noise_sampler = default_noise_sampler(x) if noise_sampler is None else noise_sampler + s_in = x.new_ones([x.shape[0]]) + for i in trange(len(sigmas) - 1, disable=disable): + denoised = model(x, sigmas[i] * s_in, **extra_args) + if callback is not None: + callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised}) + + x = denoised + if sigmas[i + 1] > 0: + x += sigmas[i + 1] * noise_sampler(sigmas[i], sigmas[i + 1]) + return x diff --git a/comfy/samplers.py b/comfy/samplers.py index 964febb26..d7ff89850 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -519,7 +519,7 @@ class UNIPCBH2(Sampler): KSAMPLER_NAMES = ["euler", "euler_ancestral", "heun", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_sde", "dpmpp_sde_gpu", - "dpmpp_2m", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm"] + "dpmpp_2m", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm"] def ksampler(sampler_name, extra_options={}, inpaint_options={}): class KSAMPLER(Sampler): diff --git a/comfy_extras/nodes_model_advanced.py b/comfy_extras/nodes_model_advanced.py index c02cfb05a..42596fbd5 100644 --- a/comfy_extras/nodes_model_advanced.py +++ b/comfy_extras/nodes_model_advanced.py @@ -1,6 +1,72 @@ import folder_paths import comfy.sd import comfy.model_sampling +import torch + +class LCM(comfy.model_sampling.EPS): + def calculate_denoised(self, sigma, model_output, model_input): + timestep = self.timestep(sigma).view(sigma.shape[:1] + (1,) * (model_output.ndim - 1)) + sigma = sigma.view(sigma.shape[:1] + (1,) * (model_output.ndim - 1)) + x0 = model_input - model_output * sigma + + sigma_data = 0.5 + scaled_timestep = timestep * 10.0 #timestep_scaling + + c_skip = sigma_data**2 / (scaled_timestep**2 + sigma_data**2) + c_out = scaled_timestep / (scaled_timestep**2 + sigma_data**2) ** 0.5 + + return c_out * x0 + c_skip * model_input + +class ModelSamplingDiscreteLCM(torch.nn.Module): + def __init__(self): + super().__init__() + self.sigma_data = 1.0 + timesteps = 1000 + beta_start = 0.00085 + beta_end = 0.012 + + betas = torch.linspace(beta_start**0.5, beta_end**0.5, timesteps, dtype=torch.float32) ** 2 + alphas = 1.0 - betas + alphas_cumprod = torch.cumprod(alphas, dim=0) + + original_timesteps = 50 + self.skip_steps = timesteps // original_timesteps + + + alphas_cumprod_valid = torch.zeros((original_timesteps), dtype=torch.float32) + for x in range(original_timesteps): + alphas_cumprod_valid[original_timesteps - 1 - x] = alphas_cumprod[timesteps - 1 - x * self.skip_steps] + + sigmas = ((1 - alphas_cumprod_valid) / alphas_cumprod_valid) ** 0.5 + self.set_sigmas(sigmas) + + def set_sigmas(self, sigmas): + self.register_buffer('sigmas', sigmas) + self.register_buffer('log_sigmas', sigmas.log()) + + @property + def sigma_min(self): + return self.sigmas[0] + + @property + def sigma_max(self): + return self.sigmas[-1] + + def timestep(self, sigma): + log_sigma = sigma.log() + dists = log_sigma.to(self.log_sigmas.device) - self.log_sigmas[:, None] + return dists.abs().argmin(dim=0).view(sigma.shape) * self.skip_steps + (self.skip_steps - 1) + + def sigma(self, timestep): + t = torch.clamp(((timestep - (self.skip_steps - 1)) / self.skip_steps).float(), min=0, max=(len(self.sigmas) - 1)) + low_idx = t.floor().long() + high_idx = t.ceil().long() + w = t.frac() + log_sigma = (1 - w) * self.log_sigmas[low_idx] + w * self.log_sigmas[high_idx] + return log_sigma.exp() + + def percent_to_sigma(self, percent): + return self.sigma(torch.tensor(percent * 999.0)) def rescale_zero_terminal_snr_sigmas(sigmas): @@ -26,7 +92,7 @@ class ModelSamplingDiscrete: @classmethod def INPUT_TYPES(s): return {"required": { "model": ("MODEL",), - "sampling": (["eps", "v_prediction"],), + "sampling": (["eps", "v_prediction", "lcm"],), "zsnr": ("BOOLEAN", {"default": False}), }} @@ -38,17 +104,22 @@ class ModelSamplingDiscrete: def patch(self, model, sampling, zsnr): m = model.clone() + sampling_base = comfy.model_sampling.ModelSamplingDiscrete if sampling == "eps": sampling_type = comfy.model_sampling.EPS elif sampling == "v_prediction": sampling_type = comfy.model_sampling.V_PREDICTION + elif sampling == "lcm": + sampling_type = LCM + sampling_base = ModelSamplingDiscreteLCM - class ModelSamplingAdvanced(comfy.model_sampling.ModelSamplingDiscrete, sampling_type): + class ModelSamplingAdvanced(sampling_base, sampling_type): pass model_sampling = ModelSamplingAdvanced() if zsnr: model_sampling.set_sigmas(rescale_zero_terminal_snr_sigmas(model_sampling.sigmas)) + m.add_object_patch("model_sampling", model_sampling) return (m, ) From 3e0033ef30a111076af54a7a4e6b470cdc570886 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 10 Nov 2023 03:19:05 -0500 Subject: [PATCH 074/170] Fix model merge bug. Unload models before getting weights for model patching. --- comfy/model_patcher.py | 1 + 1 file changed, 1 insertion(+) diff --git a/comfy/model_patcher.py b/comfy/model_patcher.py index 55800e86e..ef18d1b23 100644 --- a/comfy/model_patcher.py +++ b/comfy/model_patcher.py @@ -134,6 +134,7 @@ class ModelPatcher: return list(p) def get_key_patches(self, filter_prefix=None): + comfy.model_management.unload_model_clones(self) model_sd = self.model_state_dict() p = {} for k in model_sd: From 58d5d71a93908c6edd783d85557c2556b2e179c7 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 10 Nov 2023 20:52:10 -0500 Subject: [PATCH 075/170] Working RescaleCFG node. This was broken because of recent changes so I fixed it and moved it from the experiments repo. --- comfy/samplers.py | 2 +- comfy_extras/nodes_model_advanced.py | 39 ++++++++++++++++++++++++++++ 2 files changed, 40 insertions(+), 1 deletion(-) diff --git a/comfy/samplers.py b/comfy/samplers.py index d7ff89850..a839ee9e2 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -248,7 +248,7 @@ def sampling_function(model_function, x, timestep, uncond, cond, cond_scale, mod cond, uncond = calc_cond_uncond_batch(model_function, cond, uncond, x, timestep, max_total_area, model_options) if "sampler_cfg_function" in model_options: - args = {"cond": x - cond, "uncond": x - uncond, "cond_scale": cond_scale, "timestep": timestep, "input": x} + args = {"cond": x - cond, "uncond": x - uncond, "cond_scale": cond_scale, "timestep": timestep, "input": x, "sigma": timestep} return x - model_options["sampler_cfg_function"](args) else: return uncond + (cond - uncond) * cond_scale diff --git a/comfy_extras/nodes_model_advanced.py b/comfy_extras/nodes_model_advanced.py index 42596fbd5..09d2d9072 100644 --- a/comfy_extras/nodes_model_advanced.py +++ b/comfy_extras/nodes_model_advanced.py @@ -123,6 +123,45 @@ class ModelSamplingDiscrete: m.add_object_patch("model_sampling", model_sampling) return (m, ) +class RescaleCFG: + @classmethod + def INPUT_TYPES(s): + return {"required": { "model": ("MODEL",), + "multiplier": ("FLOAT", {"default": 0.7, "min": 0.0, "max": 1.0, "step": 0.01}), + }} + RETURN_TYPES = ("MODEL",) + FUNCTION = "patch" + + CATEGORY = "advanced/model" + + def patch(self, model, multiplier): + def rescale_cfg(args): + cond = args["cond"] + uncond = args["uncond"] + cond_scale = args["cond_scale"] + sigma = args["sigma"] + x_orig = args["input"] + + #rescale cfg has to be done on v-pred model output + x = x_orig / (sigma * sigma + 1.0) + cond = ((x - (x_orig - cond)) * (sigma ** 2 + 1.0) ** 0.5) / (sigma) + uncond = ((x - (x_orig - uncond)) * (sigma ** 2 + 1.0) ** 0.5) / (sigma) + + #rescalecfg + x_cfg = uncond + cond_scale * (cond - uncond) + ro_pos = torch.std(cond, dim=(1,2,3), keepdim=True) + ro_cfg = torch.std(x_cfg, dim=(1,2,3), keepdim=True) + + x_rescaled = x_cfg * (ro_pos / ro_cfg) + x_final = multiplier * x_rescaled + (1.0 - multiplier) * x_cfg + + return x_orig - (x - x_final * sigma / (sigma * sigma + 1.0) ** 0.5) + + m = model.clone() + m.set_model_sampler_cfg_function(rescale_cfg) + return (m, ) + NODE_CLASS_MAPPINGS = { "ModelSamplingDiscrete": ModelSamplingDiscrete, + "RescaleCFG": RescaleCFG, } From ca2812bae09f337378dc1d70714bf7287e27883a Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 10 Nov 2023 22:05:25 -0500 Subject: [PATCH 076/170] Fix RescaleCFG for batch size > 1. --- comfy_extras/nodes_model_advanced.py | 1 + 1 file changed, 1 insertion(+) diff --git a/comfy_extras/nodes_model_advanced.py b/comfy_extras/nodes_model_advanced.py index 09d2d9072..399123eaa 100644 --- a/comfy_extras/nodes_model_advanced.py +++ b/comfy_extras/nodes_model_advanced.py @@ -140,6 +140,7 @@ class RescaleCFG: uncond = args["uncond"] cond_scale = args["cond_scale"] sigma = args["sigma"] + sigma = sigma.view(sigma.shape[:1] + (1,) * (cond.ndim - 1)) x_orig = args["input"] #rescale cfg has to be done on v-pred model output From 412d3ff57d01d7e8c0889f686e31836170c4bfe3 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sat, 11 Nov 2023 01:00:43 -0500 Subject: [PATCH 077/170] Refactor. --- comfy/ops.py | 24 +++++++++--------------- 1 file changed, 9 insertions(+), 15 deletions(-) diff --git a/comfy/ops.py b/comfy/ops.py index 610d54584..0bfb698aa 100644 --- a/comfy/ops.py +++ b/comfy/ops.py @@ -1,29 +1,23 @@ import torch from contextlib import contextmanager -class Linear(torch.nn.Module): - def __init__(self, in_features: int, out_features: int, bias: bool = True, - device=None, dtype=None) -> None: - factory_kwargs = {'device': device, 'dtype': dtype} - super().__init__() - self.in_features = in_features - self.out_features = out_features - self.weight = torch.nn.Parameter(torch.empty((out_features, in_features), **factory_kwargs)) - if bias: - self.bias = torch.nn.Parameter(torch.empty(out_features, **factory_kwargs)) - else: - self.register_parameter('bias', None) - - def forward(self, input): - return torch.nn.functional.linear(input, self.weight, self.bias) +class Linear(torch.nn.Linear): + def reset_parameters(self): + return None class Conv2d(torch.nn.Conv2d): def reset_parameters(self): return None +class Conv3d(torch.nn.Conv3d): + def reset_parameters(self): + return None + def conv_nd(dims, *args, **kwargs): if dims == 2: return Conv2d(*args, **kwargs) + elif dims == 3: + return Conv3d(*args, **kwargs) else: raise ValueError(f"unsupported dimensions: {dims}") From 4a8a839b40fcae9960a6107200b89dce6675895d Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sat, 11 Nov 2023 01:03:39 -0500 Subject: [PATCH 078/170] Add option to use in place weight updating in ModelPatcher. --- comfy/model_patcher.py | 21 ++++++++++++++++----- comfy/utils.py | 8 ++++++++ 2 files changed, 24 insertions(+), 5 deletions(-) diff --git a/comfy/model_patcher.py b/comfy/model_patcher.py index ef18d1b23..6d7a61c41 100644 --- a/comfy/model_patcher.py +++ b/comfy/model_patcher.py @@ -6,7 +6,7 @@ import comfy.utils import comfy.model_management class ModelPatcher: - def __init__(self, model, load_device, offload_device, size=0, current_device=None): + def __init__(self, model, load_device, offload_device, size=0, current_device=None, weight_inplace_update=False): self.size = size self.model = model self.patches = {} @@ -22,6 +22,8 @@ class ModelPatcher: else: self.current_device = current_device + self.weight_inplace_update = weight_inplace_update + def model_size(self): if self.size > 0: return self.size @@ -171,15 +173,20 @@ class ModelPatcher: weight = model_sd[key] + inplace_update = self.weight_inplace_update + if key not in self.backup: - self.backup[key] = weight.to(self.offload_device) + self.backup[key] = weight.to(device=device_to, copy=inplace_update) if device_to is not None: temp_weight = comfy.model_management.cast_to_device(weight, device_to, torch.float32, copy=True) else: temp_weight = weight.to(torch.float32, copy=True) out_weight = self.calculate_weight(self.patches[key], temp_weight, key).to(weight.dtype) - comfy.utils.set_attr(self.model, key, out_weight) + if inplace_update: + comfy.utils.copy_to_param(self.model, key, out_weight) + else: + comfy.utils.set_attr(self.model, key, out_weight) del temp_weight if device_to is not None: @@ -295,8 +302,12 @@ class ModelPatcher: def unpatch_model(self, device_to=None): keys = list(self.backup.keys()) - for k in keys: - comfy.utils.set_attr(self.model, k, self.backup[k]) + if self.weight_inplace_update: + for k in keys: + comfy.utils.copy_to_param(self.model, k, self.backup[k]) + else: + for k in keys: + comfy.utils.set_attr(self.model, k, self.backup[k]) self.backup = {} diff --git a/comfy/utils.py b/comfy/utils.py index 6a0c54e80..4b484d07a 100644 --- a/comfy/utils.py +++ b/comfy/utils.py @@ -261,6 +261,14 @@ def set_attr(obj, attr, value): setattr(obj, attrs[-1], torch.nn.Parameter(value)) del prev +def copy_to_param(obj, attr, value): + # inplace update tensor instead of replacing it + attrs = attr.split(".") + for name in attrs[:-1]: + obj = getattr(obj, name) + prev = getattr(obj, attrs[-1]) + prev.data.copy_(value) + def get_attr(obj, attr): attrs = attr.split(".") for name in attrs: From 248aa3e56355d75ac3d8632af769e6c700d9bfac Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sat, 11 Nov 2023 12:20:16 -0500 Subject: [PATCH 079/170] Fix bug. --- comfy/model_patcher.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/model_patcher.py b/comfy/model_patcher.py index 6d7a61c41..9dc09791a 100644 --- a/comfy/model_patcher.py +++ b/comfy/model_patcher.py @@ -176,7 +176,7 @@ class ModelPatcher: inplace_update = self.weight_inplace_update if key not in self.backup: - self.backup[key] = weight.to(device=device_to, copy=inplace_update) + self.backup[key] = weight.to(device=self.offload_device, copy=inplace_update) if device_to is not None: temp_weight = comfy.model_management.cast_to_device(weight, device_to, torch.float32, copy=True) From 006b24cc328977005a866a04f418a99dd76f2c4d Mon Sep 17 00:00:00 2001 From: Jairo Correa Date: Sat, 11 Nov 2023 15:56:14 -0300 Subject: [PATCH 080/170] Prevent image cache --- web/extensions/core/maskeditor.js | 4 ++-- web/scripts/app.js | 6 +++++- web/scripts/widgets.js | 2 +- 3 files changed, 8 insertions(+), 4 deletions(-) diff --git a/web/extensions/core/maskeditor.js b/web/extensions/core/maskeditor.js index f6292b9e3..8ace79562 100644 --- a/web/extensions/core/maskeditor.js +++ b/web/extensions/core/maskeditor.js @@ -42,7 +42,7 @@ async function uploadMask(filepath, formData) { }); ComfyApp.clipspace.imgs[ComfyApp.clipspace['selectedIndex']] = new Image(); - ComfyApp.clipspace.imgs[ComfyApp.clipspace['selectedIndex']].src = api.apiURL("/view?" + new URLSearchParams(filepath).toString() + app.getPreviewFormatParam()); + ComfyApp.clipspace.imgs[ComfyApp.clipspace['selectedIndex']].src = api.apiURL("/view?" + new URLSearchParams(filepath).toString() + app.getPreviewFormatParam() + app.getRandParam()); if(ComfyApp.clipspace.images) ComfyApp.clipspace.images[ComfyApp.clipspace['selectedIndex']] = filepath; @@ -657,4 +657,4 @@ app.registerExtension({ const context_predicate = () => ComfyApp.clipspace && ComfyApp.clipspace.imgs && ComfyApp.clipspace.imgs.length > 0 ClipspaceDialog.registerButton("MaskEditor", context_predicate, ComfyApp.open_maskeditor); } -}); \ No newline at end of file +}); diff --git a/web/scripts/app.js b/web/scripts/app.js index 61b88d44b..2fa6c7f88 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -83,6 +83,10 @@ export class ComfyApp { return ""; } + getRandParam() { + return "&rand=" + Math.random(); + } + static isImageNode(node) { return node.imgs || (node && node.widgets && node.widgets.findIndex(obj => obj.name === 'image') >= 0); } @@ -427,7 +431,7 @@ export class ComfyApp { this.images = output.images; imagesChanged = true; imgURLs = imgURLs.concat(output.images.map(params => { - return api.apiURL("/view?" + new URLSearchParams(params).toString() + app.getPreviewFormatParam()); + return api.apiURL("/view?" + new URLSearchParams(params).toString() + app.getPreviewFormatParam() + app.getRandParam()); })) } } diff --git a/web/scripts/widgets.js b/web/scripts/widgets.js index 2b6747769..88c0b07ee 100644 --- a/web/scripts/widgets.js +++ b/web/scripts/widgets.js @@ -356,7 +356,7 @@ export const ComfyWidgets = { subfolder = name.substring(0, folder_separator); name = name.substring(folder_separator + 1); } - img.src = api.apiURL(`/view?filename=${encodeURIComponent(name)}&type=input&subfolder=${subfolder}${app.getPreviewFormatParam()}`); + img.src = api.apiURL(`/view?filename=${encodeURIComponent(name)}&type=input&subfolder=${subfolder}${app.getPreviewFormatParam()}${app.getRandParam()}`); node.setSizeForImage?.(); } From 8d80584f6a2797268b9b57ec84d6c76e8a27891c Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sun, 12 Nov 2023 01:25:33 -0500 Subject: [PATCH 081/170] Remove useless argument from uni_pc sampler. --- comfy/extra_samplers/uni_pc.py | 2 +- comfy/samplers.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/comfy/extra_samplers/uni_pc.py b/comfy/extra_samplers/uni_pc.py index 1a7a83929..08bf0fc9e 100644 --- a/comfy/extra_samplers/uni_pc.py +++ b/comfy/extra_samplers/uni_pc.py @@ -858,7 +858,7 @@ def predict_eps_sigma(model, input, sigma_in, **kwargs): return (input - model(input, sigma_in, **kwargs)) / sigma -def sample_unipc(model, noise, image, sigmas, sampling_function, max_denoise, extra_args=None, callback=None, disable=False, noise_mask=None, variant='bh1'): +def sample_unipc(model, noise, image, sigmas, max_denoise, extra_args=None, callback=None, disable=False, noise_mask=None, variant='bh1'): timesteps = sigmas.clone() if sigmas[-1] == 0: timesteps = sigmas[:] diff --git a/comfy/samplers.py b/comfy/samplers.py index a839ee9e2..b8836a29d 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -511,11 +511,11 @@ class Sampler: class UNIPC(Sampler): def sample(self, model_wrap, sigmas, extra_args, callback, noise, latent_image=None, denoise_mask=None, disable_pbar=False): - return uni_pc.sample_unipc(model_wrap, noise, latent_image, sigmas, sampling_function=sampling_function, max_denoise=self.max_denoise(model_wrap, sigmas), extra_args=extra_args, noise_mask=denoise_mask, callback=callback, disable=disable_pbar) + return uni_pc.sample_unipc(model_wrap, noise, latent_image, sigmas, max_denoise=self.max_denoise(model_wrap, sigmas), extra_args=extra_args, noise_mask=denoise_mask, callback=callback, disable=disable_pbar) class UNIPCBH2(Sampler): def sample(self, model_wrap, sigmas, extra_args, callback, noise, latent_image=None, denoise_mask=None, disable_pbar=False): - return uni_pc.sample_unipc(model_wrap, noise, latent_image, sigmas, sampling_function=sampling_function, max_denoise=self.max_denoise(model_wrap, sigmas), extra_args=extra_args, noise_mask=denoise_mask, callback=callback, variant='bh2', disable=disable_pbar) + return uni_pc.sample_unipc(model_wrap, noise, latent_image, sigmas, max_denoise=self.max_denoise(model_wrap, sigmas), extra_args=extra_args, noise_mask=denoise_mask, callback=callback, variant='bh2', disable=disable_pbar) KSAMPLER_NAMES = ["euler", "euler_ancestral", "heun", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_sde", "dpmpp_sde_gpu", From 2c9dba8dc08eb35d29dab691c1f2808f6c9191ea Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sun, 12 Nov 2023 03:45:10 -0500 Subject: [PATCH 082/170] sampling_function now has the model object as the argument. --- comfy/samplers.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/comfy/samplers.py b/comfy/samplers.py index b8836a29d..a2c784a4a 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -11,7 +11,7 @@ import comfy.conds #The main sampling function shared by all the samplers #Returns denoised -def sampling_function(model_function, x, timestep, uncond, cond, cond_scale, model_options={}, seed=None): +def sampling_function(model, x, timestep, uncond, cond, cond_scale, model_options={}, seed=None): def get_area_and_mult(conds, x_in, timestep_in): area = (x_in.shape[2], x_in.shape[3], 0, 0) strength = 1.0 @@ -134,7 +134,7 @@ def sampling_function(model_function, x, timestep, uncond, cond, cond_scale, mod return out - def calc_cond_uncond_batch(model_function, cond, uncond, x_in, timestep, max_total_area, model_options): + def calc_cond_uncond_batch(model, cond, uncond, x_in, timestep, max_total_area, model_options): out_cond = torch.zeros_like(x_in) out_count = torch.ones_like(x_in) * 1e-37 @@ -221,9 +221,9 @@ def sampling_function(model_function, x, timestep, uncond, cond, cond_scale, mod c['transformer_options'] = transformer_options if 'model_function_wrapper' in model_options: - output = model_options['model_function_wrapper'](model_function, {"input": input_x, "timestep": timestep_, "c": c, "cond_or_uncond": cond_or_uncond}).chunk(batch_chunks) + output = model_options['model_function_wrapper'](model.apply_model, {"input": input_x, "timestep": timestep_, "c": c, "cond_or_uncond": cond_or_uncond}).chunk(batch_chunks) else: - output = model_function(input_x, timestep_, **c).chunk(batch_chunks) + output = model.apply_model(input_x, timestep_, **c).chunk(batch_chunks) del input_x for o in range(batch_chunks): @@ -246,7 +246,7 @@ def sampling_function(model_function, x, timestep, uncond, cond, cond_scale, mod if math.isclose(cond_scale, 1.0): uncond = None - cond, uncond = calc_cond_uncond_batch(model_function, cond, uncond, x, timestep, max_total_area, model_options) + cond, uncond = calc_cond_uncond_batch(model, cond, uncond, x, timestep, max_total_area, model_options) if "sampler_cfg_function" in model_options: args = {"cond": x - cond, "uncond": x - uncond, "cond_scale": cond_scale, "timestep": timestep, "input": x, "sigma": timestep} return x - model_options["sampler_cfg_function"](args) @@ -258,7 +258,7 @@ class CFGNoisePredictor(torch.nn.Module): super().__init__() self.inner_model = model def apply_model(self, x, timestep, cond, uncond, cond_scale, model_options={}, seed=None): - out = sampling_function(self.inner_model.apply_model, x, timestep, uncond, cond, cond_scale, model_options=model_options, seed=seed) + out = sampling_function(self.inner_model, x, timestep, uncond, cond, cond_scale, model_options=model_options, seed=seed) return out def forward(self, *args, **kwargs): return self.apply_model(*args, **kwargs) From dd4ba68b6e93a562d9499eff34e50dbbbc8714e7 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sun, 12 Nov 2023 04:02:16 -0500 Subject: [PATCH 083/170] Allow different models to estimate memory usage differently. --- comfy/model_base.py | 10 ++++++++++ comfy/model_management.py | 21 --------------------- comfy/model_patcher.py | 3 +++ comfy/sample.py | 2 +- comfy/samplers.py | 9 +++++---- 5 files changed, 19 insertions(+), 26 deletions(-) diff --git a/comfy/model_base.py b/comfy/model_base.py index 7ba253470..f6de0b258 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -157,6 +157,16 @@ class BaseModel(torch.nn.Module): def set_inpaint(self): self.inpaint_model = True + def memory_required(self, input_shape): + area = input_shape[0] * input_shape[2] * input_shape[3] + if comfy.model_management.xformers_enabled() or comfy.model_management.pytorch_attention_flash_attention(): + #TODO: this needs to be tweaked + return (area / 20) * (1024 * 1024) + else: + #TODO: this formula might be too aggressive since I tweaked the sub-quad and split algorithms to use less memory. + return (((area * 0.6) / 0.9) + 1024) * (1024 * 1024) + + def unclip_adm(unclip_conditioning, device, noise_augmentor, noise_augment_merge=0.0): adm_inputs = [] weights = [] diff --git a/comfy/model_management.py b/comfy/model_management.py index 53582fc73..799e52ba2 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -579,27 +579,6 @@ def get_free_memory(dev=None, torch_free_too=False): else: return mem_free_total -def batch_area_memory(area): - if xformers_enabled() or pytorch_attention_flash_attention(): - #TODO: these formulas are copied from maximum_batch_area below - return (area / 20) * (1024 * 1024) - else: - return (((area * 0.6) / 0.9) + 1024) * (1024 * 1024) - -def maximum_batch_area(): - global vram_state - if vram_state == VRAMState.NO_VRAM: - return 0 - - memory_free = get_free_memory() / (1024 * 1024) - if xformers_enabled() or pytorch_attention_flash_attention(): - #TODO: this needs to be tweaked - area = 20 * memory_free - else: - #TODO: this formula is because AMD sucks and has memory management issues which might be fixed in the future - area = ((memory_free - 1024) * 0.9) / (0.6) - return int(max(area, 0)) - def cpu_mode(): global cpu_state return cpu_state == CPUState.CPU diff --git a/comfy/model_patcher.py b/comfy/model_patcher.py index 9dc09791a..1c36855de 100644 --- a/comfy/model_patcher.py +++ b/comfy/model_patcher.py @@ -52,6 +52,9 @@ class ModelPatcher: return True return False + def memory_required(self, input_shape): + return self.model.memory_required(input_shape=input_shape) + def set_model_sampler_cfg_function(self, sampler_cfg_function): if len(inspect.signature(sampler_cfg_function).parameters) == 3: self.model_options["sampler_cfg_function"] = lambda args: sampler_cfg_function(args["cond"], args["uncond"], args["cond_scale"]) #Old way diff --git a/comfy/sample.py b/comfy/sample.py index b3fcd1658..4bfdb8ce5 100644 --- a/comfy/sample.py +++ b/comfy/sample.py @@ -83,7 +83,7 @@ def prepare_sampling(model, noise_shape, positive, negative, noise_mask): real_model = None models, inference_memory = get_additional_models(positive, negative, model.model_dtype()) - comfy.model_management.load_models_gpu([model] + models, comfy.model_management.batch_area_memory(noise_shape[0] * noise_shape[2] * noise_shape[3]) + inference_memory) + comfy.model_management.load_models_gpu([model] + models, model.memory_required(noise_shape) + inference_memory) real_model = model.model return real_model, positive, negative, noise_mask, models diff --git a/comfy/samplers.py b/comfy/samplers.py index a2c784a4a..5340dd019 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -134,7 +134,7 @@ def sampling_function(model, x, timestep, uncond, cond, cond_scale, model_option return out - def calc_cond_uncond_batch(model, cond, uncond, x_in, timestep, max_total_area, model_options): + def calc_cond_uncond_batch(model, cond, uncond, x_in, timestep, model_options): out_cond = torch.zeros_like(x_in) out_count = torch.ones_like(x_in) * 1e-37 @@ -170,9 +170,11 @@ def sampling_function(model, x, timestep, uncond, cond, cond_scale, model_option to_batch_temp.reverse() to_batch = to_batch_temp[:1] + free_memory = model_management.get_free_memory(x_in.device) for i in range(1, len(to_batch_temp) + 1): batch_amount = to_batch_temp[:len(to_batch_temp)//i] - if (len(batch_amount) * first_shape[0] * first_shape[2] * first_shape[3] < max_total_area): + input_shape = [len(batch_amount) * first_shape[0]] + list(first_shape)[1:] + if model.memory_required(input_shape) < free_memory: to_batch = batch_amount break @@ -242,11 +244,10 @@ def sampling_function(model, x, timestep, uncond, cond, cond_scale, model_option return out_cond, out_uncond - max_total_area = model_management.maximum_batch_area() if math.isclose(cond_scale, 1.0): uncond = None - cond, uncond = calc_cond_uncond_batch(model, cond, uncond, x, timestep, max_total_area, model_options) + cond, uncond = calc_cond_uncond_batch(model, cond, uncond, x, timestep, model_options) if "sampler_cfg_function" in model_options: args = {"cond": x - cond, "uncond": x - uncond, "cond_scale": cond_scale, "timestep": timestep, "input": x, "sigma": timestep} return x - model_options["sampler_cfg_function"](args) From 4781819a85847a8cf180a41d0ee4cdf99979e5be Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sun, 12 Nov 2023 04:26:16 -0500 Subject: [PATCH 084/170] Make memory estimation aware of model dtype. --- comfy/model_base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/model_base.py b/comfy/model_base.py index f6de0b258..37bf24bb8 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -161,7 +161,7 @@ class BaseModel(torch.nn.Module): area = input_shape[0] * input_shape[2] * input_shape[3] if comfy.model_management.xformers_enabled() or comfy.model_management.pytorch_attention_flash_attention(): #TODO: this needs to be tweaked - return (area / 20) * (1024 * 1024) + return (area / (comfy.model_management.dtype_size(self.get_dtype()) * 10)) * (1024 * 1024) else: #TODO: this formula might be too aggressive since I tweaked the sub-quad and split algorithms to use less memory. return (((area * 0.6) / 0.9) + 1024) * (1024 * 1024) From 4aeef781a3caecc694e3336ca9339e8e171ba4d4 Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Sun, 12 Nov 2023 19:49:23 +0000 Subject: [PATCH 085/170] Support number/text ids when importing API JSON (#1952) * support numeric/text ids --- web/scripts/app.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/scripts/app.js b/web/scripts/app.js index 61b88d44b..d22b98c31 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -1863,7 +1863,7 @@ export class ComfyApp { for (const id of ids) { const data = apiData[id]; const node = LiteGraph.createNode(data.class_type); - node.id = id; + node.id = isNaN(+id) ? id : +id; graph.add(node); } From f12ec55983fb13b3bcad33b05ff8043b22b36181 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 13 Nov 2023 00:42:34 -0500 Subject: [PATCH 086/170] Allow boolean widgets to have no options dict. --- web/scripts/widgets.js | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/web/scripts/widgets.js b/web/scripts/widgets.js index 2b6747769..36bc7ff7f 100644 --- a/web/scripts/widgets.js +++ b/web/scripts/widgets.js @@ -305,14 +305,23 @@ export const ComfyWidgets = { }; }, BOOLEAN(node, inputName, inputData) { - let defaultVal = inputData[1]["default"]; + let defaultVal = false; + let options = {}; + if (inputData[1]) { + if (inputData[1].default) + defaultVal = inputData[1].default; + if (inputData[1].label_on) + options["on"] = inputData[1].label_on; + if (inputData[1].label_off) + options["off"] = inputData[1].label_off; + } return { widget: node.addWidget( "toggle", inputName, defaultVal, () => {}, - {"on": inputData[1].label_on, "off": inputData[1].label_off} + options, ) }; }, From 7339479b10a622729222ae7d9a5e06db340a1b99 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 13 Nov 2023 12:27:44 -0500 Subject: [PATCH 087/170] Disable xformers when it can't load properly. --- comfy/model_management.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/comfy/model_management.py b/comfy/model_management.py index 799e52ba2..be4301aa4 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -133,6 +133,10 @@ else: import xformers import xformers.ops XFORMERS_IS_AVAILABLE = True + try: + XFORMERS_IS_AVAILABLE = xformers._has_cpp_library + except: + pass try: XFORMERS_VERSION = xformers.version.__version__ print("xformers version:", XFORMERS_VERSION) From eb0407e80657ab603a1251a653ad8b2e9e89c83c Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 13 Nov 2023 16:26:28 -0500 Subject: [PATCH 088/170] Update litegraph to latest. --- web/lib/litegraph.core.js | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/web/lib/litegraph.core.js b/web/lib/litegraph.core.js index e906590f5..0ca203842 100644 --- a/web/lib/litegraph.core.js +++ b/web/lib/litegraph.core.js @@ -2533,7 +2533,7 @@ var w = this.widgets[i]; if(!w) continue; - if(w.options && w.options.property && this.properties[ w.options.property ]) + if(w.options && w.options.property && (this.properties[ w.options.property ] != undefined)) w.value = JSON.parse( JSON.stringify( this.properties[ w.options.property ] ) ); } if (info.widgets_values) { @@ -4928,9 +4928,7 @@ LGraphNode.prototype.executeAction = function(action) this.title = o.title; this._bounding.set(o.bounding); this.color = o.color; - if (o.font_size) { - this.font_size = o.font_size; - } + this.font_size = o.font_size; }; LGraphGroup.prototype.serialize = function() { @@ -5714,10 +5712,10 @@ LGraphNode.prototype.executeAction = function(action) * @method enableWebGL **/ LGraphCanvas.prototype.enableWebGL = function() { - if (typeof GL === undefined) { + if (typeof GL === "undefined") { throw "litegl.js must be included to use a WebGL canvas"; } - if (typeof enableWebGLCanvas === undefined) { + if (typeof enableWebGLCanvas === "undefined") { throw "webglCanvas.js must be included to use this feature"; } @@ -7110,15 +7108,16 @@ LGraphNode.prototype.executeAction = function(action) } }; - LGraphCanvas.prototype.copyToClipboard = function() { + LGraphCanvas.prototype.copyToClipboard = function(nodes) { var clipboard_info = { nodes: [], links: [] }; var index = 0; var selected_nodes_array = []; - for (var i in this.selected_nodes) { - var node = this.selected_nodes[i]; + if (!nodes) nodes = this.selected_nodes; + for (var i in nodes) { + var node = nodes[i]; if (node.clonable === false) continue; node._relative_id = index; @@ -11702,7 +11701,7 @@ LGraphNode.prototype.executeAction = function(action) default: iS = 0; // try with first if no name set } - if (typeof options.node_from.outputs[iS] !== undefined){ + if (typeof options.node_from.outputs[iS] !== "undefined"){ if (iS!==false && iS>-1){ options.node_from.connectByType( iS, node, options.node_from.outputs[iS].type ); } @@ -11730,7 +11729,7 @@ LGraphNode.prototype.executeAction = function(action) default: iS = 0; // try with first if no name set } - if (typeof options.node_to.inputs[iS] !== undefined){ + if (typeof options.node_to.inputs[iS] !== "undefined"){ if (iS!==false && iS>-1){ // try connection options.node_to.connectByTypeOutput(iS,node,options.node_to.inputs[iS].type); From 61112c81b99d0e43c2d6031aae036eed8a39fdbb Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 13 Nov 2023 21:45:08 -0500 Subject: [PATCH 089/170] Add a node to flip the sigmas for unsampling. --- comfy_extras/nodes_custom_sampler.py | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/comfy_extras/nodes_custom_sampler.py b/comfy_extras/nodes_custom_sampler.py index 154ecd0d2..ff7407f41 100644 --- a/comfy_extras/nodes_custom_sampler.py +++ b/comfy_extras/nodes_custom_sampler.py @@ -118,6 +118,24 @@ class SplitSigmas: sigmas2 = sigmas[step:] return (sigmas1, sigmas2) +class FlipSigmas: + @classmethod + def INPUT_TYPES(s): + return {"required": + {"sigmas": ("SIGMAS", ), + } + } + RETURN_TYPES = ("SIGMAS",) + CATEGORY = "sampling/custom_sampling/sigmas" + + FUNCTION = "get_sigmas" + + def get_sigmas(self, sigmas): + sigmas = sigmas.flip(0) + if sigmas[0] == 0: + sigmas[0] = 0.0001 + return (sigmas,) + class KSamplerSelect: @classmethod def INPUT_TYPES(s): @@ -243,4 +261,5 @@ NODE_CLASS_MAPPINGS = { "SamplerDPMPP_SDE": SamplerDPMPP_SDE, "BasicScheduler": BasicScheduler, "SplitSigmas": SplitSigmas, + "FlipSigmas": FlipSigmas, } From 8509bd58b436eb56e1e251c627416b457626252a Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 13 Nov 2023 21:45:23 -0500 Subject: [PATCH 090/170] Reorganize custom_sampling nodes. --- comfy_extras/nodes_custom_sampler.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/comfy_extras/nodes_custom_sampler.py b/comfy_extras/nodes_custom_sampler.py index ff7407f41..f0576946a 100644 --- a/comfy_extras/nodes_custom_sampler.py +++ b/comfy_extras/nodes_custom_sampler.py @@ -16,7 +16,7 @@ class BasicScheduler: } } RETURN_TYPES = ("SIGMAS",) - CATEGORY = "sampling/custom_sampling" + CATEGORY = "sampling/custom_sampling/schedulers" FUNCTION = "get_sigmas" @@ -36,7 +36,7 @@ class KarrasScheduler: } } RETURN_TYPES = ("SIGMAS",) - CATEGORY = "sampling/custom_sampling" + CATEGORY = "sampling/custom_sampling/schedulers" FUNCTION = "get_sigmas" @@ -54,7 +54,7 @@ class ExponentialScheduler: } } RETURN_TYPES = ("SIGMAS",) - CATEGORY = "sampling/custom_sampling" + CATEGORY = "sampling/custom_sampling/schedulers" FUNCTION = "get_sigmas" @@ -73,7 +73,7 @@ class PolyexponentialScheduler: } } RETURN_TYPES = ("SIGMAS",) - CATEGORY = "sampling/custom_sampling" + CATEGORY = "sampling/custom_sampling/schedulers" FUNCTION = "get_sigmas" @@ -92,7 +92,7 @@ class VPScheduler: } } RETURN_TYPES = ("SIGMAS",) - CATEGORY = "sampling/custom_sampling" + CATEGORY = "sampling/custom_sampling/schedulers" FUNCTION = "get_sigmas" @@ -109,7 +109,7 @@ class SplitSigmas: } } RETURN_TYPES = ("SIGMAS","SIGMAS") - CATEGORY = "sampling/custom_sampling" + CATEGORY = "sampling/custom_sampling/sigmas" FUNCTION = "get_sigmas" @@ -144,7 +144,7 @@ class KSamplerSelect: } } RETURN_TYPES = ("SAMPLER",) - CATEGORY = "sampling/custom_sampling" + CATEGORY = "sampling/custom_sampling/samplers" FUNCTION = "get_sampler" @@ -163,7 +163,7 @@ class SamplerDPMPP_2M_SDE: } } RETURN_TYPES = ("SAMPLER",) - CATEGORY = "sampling/custom_sampling" + CATEGORY = "sampling/custom_sampling/samplers" FUNCTION = "get_sampler" @@ -187,7 +187,7 @@ class SamplerDPMPP_SDE: } } RETURN_TYPES = ("SAMPLER",) - CATEGORY = "sampling/custom_sampling" + CATEGORY = "sampling/custom_sampling/samplers" FUNCTION = "get_sampler" @@ -252,6 +252,7 @@ class SamplerCustom: NODE_CLASS_MAPPINGS = { "SamplerCustom": SamplerCustom, + "BasicScheduler": BasicScheduler, "KarrasScheduler": KarrasScheduler, "ExponentialScheduler": ExponentialScheduler, "PolyexponentialScheduler": PolyexponentialScheduler, @@ -259,7 +260,6 @@ NODE_CLASS_MAPPINGS = { "KSamplerSelect": KSamplerSelect, "SamplerDPMPP_2M_SDE": SamplerDPMPP_2M_SDE, "SamplerDPMPP_SDE": SamplerDPMPP_SDE, - "BasicScheduler": BasicScheduler, "SplitSigmas": SplitSigmas, "FlipSigmas": FlipSigmas, } From 94cc718e9c42cb4de337293b66dd42fb594b9cae Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 14 Nov 2023 00:08:12 -0500 Subject: [PATCH 091/170] Add a way to add patches to the input block. --- comfy/ldm/modules/diffusionmodules/openaimodel.py | 5 +++++ comfy/model_patcher.py | 3 +++ 2 files changed, 8 insertions(+) diff --git a/comfy/ldm/modules/diffusionmodules/openaimodel.py b/comfy/ldm/modules/diffusionmodules/openaimodel.py index 49c1e8cbb..cac0dfb65 100644 --- a/comfy/ldm/modules/diffusionmodules/openaimodel.py +++ b/comfy/ldm/modules/diffusionmodules/openaimodel.py @@ -624,6 +624,11 @@ class UNetModel(nn.Module): transformer_options["block"] = ("input", id) h = forward_timestep_embed(module, h, emb, context, transformer_options) h = apply_control(h, control, 'input') + if "input_block_patch" in transformer_patches: + patch = transformer_patches["input_block_patch"] + for p in patch: + h = p(h, transformer_options) + hs.append(h) transformer_options["block"] = ("middle", 0) diff --git a/comfy/model_patcher.py b/comfy/model_patcher.py index 1c36855de..023684331 100644 --- a/comfy/model_patcher.py +++ b/comfy/model_patcher.py @@ -96,6 +96,9 @@ class ModelPatcher: def set_model_attn2_output_patch(self, patch): self.set_model_patch(patch, "attn2_output_patch") + def set_model_input_block_patch(self, patch): + self.set_model_patch(patch, "input_block_patch") + def set_model_output_block_patch(self, patch): self.set_model_patch(patch, "output_block_patch") From f2e49b1d575b3da4367ba4d60b95187f270d42c9 Mon Sep 17 00:00:00 2001 From: Jianqi Pan Date: Tue, 14 Nov 2023 14:32:05 +0900 Subject: [PATCH 092/170] fix: adaptation to older versions of pytroch --- comfy/sd1_clip.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/sd1_clip.py b/comfy/sd1_clip.py index 7db7ee0f4..af621b2dc 100644 --- a/comfy/sd1_clip.py +++ b/comfy/sd1_clip.py @@ -175,7 +175,7 @@ class SDClipModel(torch.nn.Module, ClipTokenWeightEncoder): else: precision_scope = lambda a, b: contextlib.nullcontext(a) - with precision_scope(model_management.get_autocast_device(device), torch.float32): + with precision_scope(model_management.get_autocast_device(device), dtype=torch.float32): attention_mask = None if self.enable_attention_masks: attention_mask = torch.zeros_like(tokens) From 420beeeb05ef59e887f8731f615f8a9ec6eb0a4c Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 14 Nov 2023 00:39:34 -0500 Subject: [PATCH 093/170] Clean up and refactor sampler code. This should make it much easier to write custom nodes with kdiffusion type samplers. --- comfy/samplers.py | 89 +++++++++++++++++----------- comfy_extras/nodes_custom_sampler.py | 6 +- 2 files changed, 56 insertions(+), 39 deletions(-) diff --git a/comfy/samplers.py b/comfy/samplers.py index 5340dd019..65c44791d 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -522,42 +522,59 @@ KSAMPLER_NAMES = ["euler", "euler_ancestral", "heun", "dpm_2", "dpm_2_ancestral" "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm"] +class KSAMPLER(Sampler): + def __init__(self, sampler_function, extra_options={}, inpaint_options={}): + self.sampler_function = sampler_function + self.extra_options = extra_options + self.inpaint_options = inpaint_options + + def sample(self, model_wrap, sigmas, extra_args, callback, noise, latent_image=None, denoise_mask=None, disable_pbar=False): + extra_args["denoise_mask"] = denoise_mask + model_k = KSamplerX0Inpaint(model_wrap) + model_k.latent_image = latent_image + if self.inpaint_options.get("random", False): #TODO: Should this be the default? + generator = torch.manual_seed(extra_args.get("seed", 41) + 1) + model_k.noise = torch.randn(noise.shape, generator=generator, device="cpu").to(noise.dtype).to(noise.device) + else: + model_k.noise = noise + + if self.max_denoise(model_wrap, sigmas): + noise = noise * torch.sqrt(1.0 + sigmas[0] ** 2.0) + else: + noise = noise * sigmas[0] + + k_callback = None + total_steps = len(sigmas) - 1 + if callback is not None: + k_callback = lambda x: callback(x["i"], x["denoised"], x["x"], total_steps) + + if latent_image is not None: + noise += latent_image + + samples = self.sampler_function(model_k, noise, sigmas, extra_args=extra_args, callback=k_callback, disable=disable_pbar, **self.extra_options) + return samples + + def ksampler(sampler_name, extra_options={}, inpaint_options={}): - class KSAMPLER(Sampler): - def sample(self, model_wrap, sigmas, extra_args, callback, noise, latent_image=None, denoise_mask=None, disable_pbar=False): - extra_args["denoise_mask"] = denoise_mask - model_k = KSamplerX0Inpaint(model_wrap) - model_k.latent_image = latent_image - if inpaint_options.get("random", False): #TODO: Should this be the default? - generator = torch.manual_seed(extra_args.get("seed", 41) + 1) - model_k.noise = torch.randn(noise.shape, generator=generator, device="cpu").to(noise.dtype).to(noise.device) - else: - model_k.noise = noise - - if self.max_denoise(model_wrap, sigmas): - noise = noise * torch.sqrt(1.0 + sigmas[0] ** 2.0) - else: - noise = noise * sigmas[0] - - k_callback = None - total_steps = len(sigmas) - 1 - if callback is not None: - k_callback = lambda x: callback(x["i"], x["denoised"], x["x"], total_steps) - + if sampler_name == "dpm_fast": + def dpm_fast_function(model, noise, sigmas, extra_args, callback, disable): sigma_min = sigmas[-1] if sigma_min == 0: sigma_min = sigmas[-2] + total_steps = len(sigmas) - 1 + return k_diffusion_sampling.sample_dpm_fast(model, noise, sigma_min, sigmas[0], total_steps, extra_args=extra_args, callback=callback, disable=disable) + sampler_function = dpm_fast_function + elif sampler_name == "dpm_adaptive": + def dpm_adaptive_function(model, noise, sigmas, extra_args, callback, disable): + sigma_min = sigmas[-1] + if sigma_min == 0: + sigma_min = sigmas[-2] + return k_diffusion_sampling.sample_dpm_adaptive(model, noise, sigma_min, sigmas[0], extra_args=extra_args, callback=callback, disable=disable) + sampler_function = dpm_adaptive_function + else: + sampler_function = getattr(k_diffusion_sampling, "sample_{}".format(sampler_name)) - if latent_image is not None: - noise += latent_image - if sampler_name == "dpm_fast": - samples = k_diffusion_sampling.sample_dpm_fast(model_k, noise, sigma_min, sigmas[0], total_steps, extra_args=extra_args, callback=k_callback, disable=disable_pbar) - elif sampler_name == "dpm_adaptive": - samples = k_diffusion_sampling.sample_dpm_adaptive(model_k, noise, sigma_min, sigmas[0], extra_args=extra_args, callback=k_callback, disable=disable_pbar) - else: - samples = getattr(k_diffusion_sampling, "sample_{}".format(sampler_name))(model_k, noise, sigmas, extra_args=extra_args, callback=k_callback, disable=disable_pbar, **extra_options) - return samples - return KSAMPLER + return KSAMPLER(sampler_function, extra_options, inpaint_options) def wrap_model(model): model_denoise = CFGNoisePredictor(model) @@ -618,11 +635,11 @@ def calculate_sigmas_scheduler(model, scheduler_name, steps): print("error invalid scheduler", self.scheduler) return sigmas -def sampler_class(name): +def sampler_object(name): if name == "uni_pc": - sampler = UNIPC + sampler = UNIPC() elif name == "uni_pc_bh2": - sampler = UNIPCBH2 + sampler = UNIPCBH2() elif name == "ddim": sampler = ksampler("euler", inpaint_options={"random": True}) else: @@ -687,6 +704,6 @@ class KSampler: else: return torch.zeros_like(noise) - sampler = sampler_class(self.sampler) + sampler = sampler_object(self.sampler) - return sample(self.model, noise, positive, negative, cfg, self.device, sampler(), sigmas, self.model_options, latent_image=latent_image, denoise_mask=denoise_mask, callback=callback, disable_pbar=disable_pbar, seed=seed) + return sample(self.model, noise, positive, negative, cfg, self.device, sampler, sigmas, self.model_options, latent_image=latent_image, denoise_mask=denoise_mask, callback=callback, disable_pbar=disable_pbar, seed=seed) diff --git a/comfy_extras/nodes_custom_sampler.py b/comfy_extras/nodes_custom_sampler.py index f0576946a..d3c1d4a23 100644 --- a/comfy_extras/nodes_custom_sampler.py +++ b/comfy_extras/nodes_custom_sampler.py @@ -149,7 +149,7 @@ class KSamplerSelect: FUNCTION = "get_sampler" def get_sampler(self, sampler_name): - sampler = comfy.samplers.sampler_class(sampler_name)() + sampler = comfy.samplers.sampler_object(sampler_name) return (sampler, ) class SamplerDPMPP_2M_SDE: @@ -172,7 +172,7 @@ class SamplerDPMPP_2M_SDE: sampler_name = "dpmpp_2m_sde" else: sampler_name = "dpmpp_2m_sde_gpu" - sampler = comfy.samplers.ksampler(sampler_name, {"eta": eta, "s_noise": s_noise, "solver_type": solver_type})() + sampler = comfy.samplers.ksampler(sampler_name, {"eta": eta, "s_noise": s_noise, "solver_type": solver_type}) return (sampler, ) @@ -196,7 +196,7 @@ class SamplerDPMPP_SDE: sampler_name = "dpmpp_sde" else: sampler_name = "dpmpp_sde_gpu" - sampler = comfy.samplers.ksampler(sampler_name, {"eta": eta, "s_noise": s_noise, "r": r})() + sampler = comfy.samplers.ksampler(sampler_name, {"eta": eta, "s_noise": s_noise, "r": r}) return (sampler, ) class SamplerCustom: From c962884a5c987e95d6928565ddb44220b769808e Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 14 Nov 2023 11:38:36 -0500 Subject: [PATCH 094/170] Make bislerp work on GPU. --- comfy/utils.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/comfy/utils.py b/comfy/utils.py index 4b484d07a..1985012e0 100644 --- a/comfy/utils.py +++ b/comfy/utils.py @@ -307,13 +307,13 @@ def bislerp(samples, width, height): res[dot < 1e-5 - 1] = (b1 * (1.0-r) + b2 * r)[dot < 1e-5 - 1] return res - def generate_bilinear_data(length_old, length_new): - coords_1 = torch.arange(length_old).reshape((1,1,1,-1)).to(torch.float32) + def generate_bilinear_data(length_old, length_new, device): + coords_1 = torch.arange(length_old, dtype=torch.float32, device=device).reshape((1,1,1,-1)) coords_1 = torch.nn.functional.interpolate(coords_1, size=(1, length_new), mode="bilinear") ratios = coords_1 - coords_1.floor() coords_1 = coords_1.to(torch.int64) - coords_2 = torch.arange(length_old).reshape((1,1,1,-1)).to(torch.float32) + 1 + coords_2 = torch.arange(length_old, dtype=torch.float32, device=device).reshape((1,1,1,-1)) + 1 coords_2[:,:,:,-1] -= 1 coords_2 = torch.nn.functional.interpolate(coords_2, size=(1, length_new), mode="bilinear") coords_2 = coords_2.to(torch.int64) @@ -323,7 +323,7 @@ def bislerp(samples, width, height): h_new, w_new = (height, width) #linear w - ratios, coords_1, coords_2 = generate_bilinear_data(w, w_new) + ratios, coords_1, coords_2 = generate_bilinear_data(w, w_new, samples.device) coords_1 = coords_1.expand((n, c, h, -1)) coords_2 = coords_2.expand((n, c, h, -1)) ratios = ratios.expand((n, 1, h, -1)) @@ -336,7 +336,7 @@ def bislerp(samples, width, height): result = result.reshape(n, h, w_new, c).movedim(-1, 1) #linear h - ratios, coords_1, coords_2 = generate_bilinear_data(h, h_new) + ratios, coords_1, coords_2 = generate_bilinear_data(h, h_new, samples.device) coords_1 = coords_1.reshape((1,1,-1,1)).expand((n, c, -1, w_new)) coords_2 = coords_2.reshape((1,1,-1,1)).expand((n, c, -1, w_new)) ratios = ratios.reshape((1,1,-1,1)).expand((n, 1, -1, w_new)) From 728613bb3e9a42a3e05abf19b1b893eb6ef35081 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 14 Nov 2023 14:41:31 -0500 Subject: [PATCH 095/170] Fix last pr. --- comfy/sd1_clip.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/sd1_clip.py b/comfy/sd1_clip.py index af621b2dc..58acb97fc 100644 --- a/comfy/sd1_clip.py +++ b/comfy/sd1_clip.py @@ -173,7 +173,7 @@ class SDClipModel(torch.nn.Module, ClipTokenWeightEncoder): if getattr(self.transformer, self.inner_name).final_layer_norm.weight.dtype != torch.float32: precision_scope = torch.autocast else: - precision_scope = lambda a, b: contextlib.nullcontext(a) + precision_scope = lambda a, dtype: contextlib.nullcontext(a) with precision_scope(model_management.get_autocast_device(device), dtype=torch.float32): attention_mask = None From 7b87c825a3e95b362b101a608bbae2bbf13e1850 Mon Sep 17 00:00:00 2001 From: 42lux Date: Wed, 15 Nov 2023 02:37:35 +0100 Subject: [PATCH 096/170] Added Colorschemes. Arc, North and Github. --- web/extensions/core/colorPalette.js | 207 ++++++++++++++++++++++++++++ 1 file changed, 207 insertions(+) diff --git a/web/extensions/core/colorPalette.js b/web/extensions/core/colorPalette.js index 3695b08e2..b8d83613d 100644 --- a/web/extensions/core/colorPalette.js +++ b/web/extensions/core/colorPalette.js @@ -174,6 +174,213 @@ const colorPalettes = { "tr-odd-bg-color": "#073642", } }, + }, + "arc": { + "id": "arc", + "name": "Arc", + "colors": { + "node_slot": { + "BOOLEAN": "", + "CLIP": "#eacb8b", + "CLIP_VISION": "#A8DADC", + "CLIP_VISION_OUTPUT": "#ad7452", + "CONDITIONING": "#cf876f", + "CONTROL_NET": "#00d78d", + "CONTROL_NET_WEIGHTS": "", + "FLOAT": "", + "GLIGEN": "", + "IMAGE": "#80a1c0", + "IMAGEUPLOAD": "", + "INT": "", + "LATENT": "#b38ead", + "LATENT_KEYFRAME": "", + "MASK": "#a3bd8d", + "MODEL": "#8978a7", + "SAMPLER": "", + "SIGMAS": "", + "STRING": "", + "STYLE_MODEL": "#C2FFAE", + "T2I_ADAPTER_WEIGHTS": "", + "TAESD": "#DCC274", + "TIMESTEP_KEYFRAME": "", + "UPSCALE_MODEL": "", + "VAE": "#be616b" + }, + "litegraph_base": { + "BACKGROUND_IMAGE": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAGQAAABkCAYAAABw4pVUAAAACXBIWXMAAAsTAAALEwEAmpwYAAABcklEQVR4nO3YMUoDARgF4RfxBqZI6/0vZqFn0MYtrLIQMFN8U6V4LAtD+Jm9XG/v30OGl2e/AP7yevz4+vx45nvgF/+QGITEICQGITEIiUFIjNNC3q43u3/YnRJyPOzeQ+0e220nhRzReC8e7R7bbdvl+Jal1Bs46jEIiUFIDEJiEBKDkBhKPbZT6qHdptRTu02p53DUYxASg5AYhMQgJAYhMZR6bKfUQ7tNqad2m1LP4ajHICQGITEIiUFIDEJiKPXYTqmHdptST+02pZ7DUY9BSAxCYhASg5AYhMRQ6rGdUg/tNqWe2m1KPYejHoOQGITEICQGITEIiaHUYzulHtptSj2125R6Dkc9BiExCIlBSAxCYhASQ6nHdko9tNuUemq3KfUcjnoMQmIQEoOQGITEICSGUo/tlHpotyn11G5T6jkc9RiExCAkBiExCIlBSAylHtsp9dBuU+qp3abUczjqMQiJQUgMQmIQEoOQGITE+AHFISNQrFTGuwAAAABJRU5ErkJggg==", + "CLEAR_BACKGROUND_COLOR": "#2b2f38", + "NODE_TITLE_COLOR": "#b2b7bd", + "NODE_SELECTED_TITLE_COLOR": "#FFF", + "NODE_TEXT_SIZE": 14, + "NODE_TEXT_COLOR": "#AAA", + "NODE_SUBTEXT_SIZE": 12, + "NODE_DEFAULT_COLOR": "#2b2f38", + "NODE_DEFAULT_BGCOLOR": "#242730", + "NODE_DEFAULT_BOXCOLOR": "#6e7581", + "NODE_DEFAULT_SHAPE": "box", + "NODE_BOX_OUTLINE_COLOR": "#FFF", + "DEFAULT_SHADOW_COLOR": "rgba(0,0,0,0.5)", + "DEFAULT_GROUP_FONT": 22, + "WIDGET_BGCOLOR": "#2b2f38", + "WIDGET_OUTLINE_COLOR": "#6e7581", + "WIDGET_TEXT_COLOR": "#DDD", + "WIDGET_SECONDARY_TEXT_COLOR": "#b2b7bd", + "LINK_COLOR": "#9A9", + "EVENT_LINK_COLOR": "#A86", + "CONNECTING_LINK_COLOR": "#AFA" + }, + "comfy_base": { + "fg-color": "#fff", + "bg-color": "#2b2f38", + "comfy-menu-bg": "#242730", + "comfy-input-bg": "#2b2f38", + "input-text": "#ddd", + "descrip-text": "#b2b7bd", + "drag-text": "#ccc", + "error-text": "#ff4444", + "border-color": "#6e7581", + "tr-even-bg-color": "#2b2f38", + "tr-odd-bg-color": "#242730" + } + }, + }, + "nord": { + "id": "nord", + "name": "Nord", + "colors": { + "node_slot": { + "BOOLEAN": "", + "CLIP": "#eacb8b", + "CLIP_VISION": "#A8DADC", + "CLIP_VISION_OUTPUT": "#ad7452", + "CONDITIONING": "#cf876f", + "CONTROL_NET": "#00d78d", + "CONTROL_NET_WEIGHTS": "", + "FLOAT": "", + "GLIGEN": "", + "IMAGE": "#80a1c0", + "IMAGEUPLOAD": "", + "INT": "", + "LATENT": "#b38ead", + "LATENT_KEYFRAME": "", + "MASK": "#a3bd8d", + "MODEL": "#8978a7", + "SAMPLER": "", + "SIGMAS": "", + "STRING": "", + "STYLE_MODEL": "#C2FFAE", + "T2I_ADAPTER_WEIGHTS": "", + "TAESD": "#DCC274", + "TIMESTEP_KEYFRAME": "", + "UPSCALE_MODEL": "", + "VAE": "#be616b" + }, + "litegraph_base": { + "BACKGROUND_IMAGE": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAGQAAABkCAIAAAD/gAIDAAAACXBIWXMAAAsTAAALEwEAmpwYAAAFu2lUWHRYTUw6Y29tLmFkb2JlLnhtcAAAAAAAPD94cGFja2V0IGJlZ2luPSLvu78iIGlkPSJXNU0wTXBDZWhpSHpyZVN6TlRjemtjOWQiPz4gPHg6eG1wbWV0YSB4bWxuczp4PSJhZG9iZTpuczptZXRhLyIgeDp4bXB0az0iQWRvYmUgWE1QIENvcmUgOS4xLWMwMDEgNzkuMTQ2Mjg5OSwgMjAyMy8wNi8yNS0yMDowMTo1NSAgICAgICAgIj4gPHJkZjpSREYgeG1sbnM6cmRmPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5LzAyLzIyLXJkZi1zeW50YXgtbnMjIj4gPHJkZjpEZXNjcmlwdGlvbiByZGY6YWJvdXQ9IiIgeG1sbnM6eG1wPSJodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvIiB4bWxuczpkYz0iaHR0cDovL3B1cmwub3JnL2RjL2VsZW1lbnRzLzEuMS8iIHhtbG5zOnBob3Rvc2hvcD0iaHR0cDovL25zLmFkb2JlLmNvbS9waG90b3Nob3AvMS4wLyIgeG1sbnM6eG1wTU09Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC9tbS8iIHhtbG5zOnN0RXZ0PSJodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvc1R5cGUvUmVzb3VyY2VFdmVudCMiIHhtcDpDcmVhdG9yVG9vbD0iQWRvYmUgUGhvdG9zaG9wIDI1LjEgKFdpbmRvd3MpIiB4bXA6Q3JlYXRlRGF0ZT0iMjAyMy0xMS0xM1QwMDoxODowMiswMTowMCIgeG1wOk1vZGlmeURhdGU9IjIwMjMtMTEtMTVUMDE6MjA6NDUrMDE6MDAiIHhtcDpNZXRhZGF0YURhdGU9IjIwMjMtMTEtMTVUMDE6MjA6NDUrMDE6MDAiIGRjOmZvcm1hdD0iaW1hZ2UvcG5nIiBwaG90b3Nob3A6Q29sb3JNb2RlPSIzIiB4bXBNTTpJbnN0YW5jZUlEPSJ4bXAuaWlkOjUwNDFhMmZjLTEzNzQtMTk0ZC1hZWY4LTYxMzM1MTVmNjUwMCIgeG1wTU06RG9jdW1lbnRJRD0ieG1wLmRpZDoyMzFiMTBiMC1iNGZiLTAyNGUtYjEyZS0zMDUzMDNjZDA3YzgiIHhtcE1NOk9yaWdpbmFsRG9jdW1lbnRJRD0ieG1wLmRpZDoyMzFiMTBiMC1iNGZiLTAyNGUtYjEyZS0zMDUzMDNjZDA3YzgiPiA8eG1wTU06SGlzdG9yeT4gPHJkZjpTZXE+IDxyZGY6bGkgc3RFdnQ6YWN0aW9uPSJjcmVhdGVkIiBzdEV2dDppbnN0YW5jZUlEPSJ4bXAuaWlkOjIzMWIxMGIwLWI0ZmItMDI0ZS1iMTJlLTMwNTMwM2NkMDdjOCIgc3RFdnQ6d2hlbj0iMjAyMy0xMS0xM1QwMDoxODowMiswMTowMCIgc3RFdnQ6c29mdHdhcmVBZ2VudD0iQWRvYmUgUGhvdG9zaG9wIDI1LjEgKFdpbmRvd3MpIi8+IDxyZGY6bGkgc3RFdnQ6YWN0aW9uPSJzYXZlZCIgc3RFdnQ6aW5zdGFuY2VJRD0ieG1wLmlpZDo1MDQxYTJmYy0xMzc0LTE5NGQtYWVmOC02MTMzNTE1ZjY1MDAiIHN0RXZ0OndoZW49IjIwMjMtMTEtMTVUMDE6MjA6NDUrMDE6MDAiIHN0RXZ0OnNvZnR3YXJlQWdlbnQ9IkFkb2JlIFBob3Rvc2hvcCAyNS4xIChXaW5kb3dzKSIgc3RFdnQ6Y2hhbmdlZD0iLyIvPiA8L3JkZjpTZXE+IDwveG1wTU06SGlzdG9yeT4gPC9yZGY6RGVzY3JpcHRpb24+IDwvcmRmOlJERj4gPC94OnhtcG1ldGE+IDw/eHBhY2tldCBlbmQ9InIiPz73jWg/AAAAyUlEQVR42u3WKwoAIBRFQRdiMb1idv9Lsxn9gEFw4Dbb8JCTojbbXEJwjJVL2HKwYMGCBQuWLbDmjr+9zrBGjHl1WVcvy2DBggULFizTWQpewSt4HzwsgwULFiwFr7MUvMtS8D54WLBgGSxYCl7BK3iXZbBgwYIFC5bpLAWv4BW8Dx6WwYIFC5aC11kK3mUpeB88LFiwDBYsBa/gFbzLMliwYMGCBct0loJX8AreBw/LYMGCBUvB6ywF77IUvA8eFixYBgsWrNfWAZPltufdad+1AAAAAElFTkSuQmCC", + "CLEAR_BACKGROUND_COLOR": "#212732", + "NODE_TITLE_COLOR": "#999", + "NODE_SELECTED_TITLE_COLOR": "#e5eaf0", + "NODE_TEXT_SIZE": 14, + "NODE_TEXT_COLOR": "#bcc2c8", + "NODE_SUBTEXT_SIZE": 12, + "NODE_DEFAULT_COLOR": "#2e3440", + "NODE_DEFAULT_BGCOLOR": "#161b22", + "NODE_DEFAULT_BOXCOLOR": "#545d70", + "NODE_DEFAULT_SHAPE": "box", + "NODE_BOX_OUTLINE_COLOR": "#e5eaf0", + "DEFAULT_SHADOW_COLOR": "rgba(0,0,0,0.5)", + "DEFAULT_GROUP_FONT": 24, + "WIDGET_BGCOLOR": "#2e3440", + "WIDGET_OUTLINE_COLOR": "#545d70", + "WIDGET_TEXT_COLOR": "#bcc2c8", + "WIDGET_SECONDARY_TEXT_COLOR": "#999", + "LINK_COLOR": "#9A9", + "EVENT_LINK_COLOR": "#A86", + "CONNECTING_LINK_COLOR": "#AFA" + }, + "comfy_base": { + "fg-color": "#e5eaf0", + "bg-color": "#2e3440", + "comfy-menu-bg": "#161b22", + "comfy-input-bg": "#2e3440", + "input-text": "#bcc2c8", + "descrip-text": "#999", + "drag-text": "#ccc", + "error-text": "#ff4444", + "border-color": "#545d70", + "tr-even-bg-color": "#2e3440", + "tr-odd-bg-color": "#161b22" + } + }, + }, + "github": { + "id": "github", + "name": "Github", + "colors": { + "node_slot": { + "BOOLEAN": "", + "CLIP": "#eacb8b", + "CLIP_VISION": "#A8DADC", + "CLIP_VISION_OUTPUT": "#ad7452", + "CONDITIONING": "#cf876f", + "CONTROL_NET": "#00d78d", + "CONTROL_NET_WEIGHTS": "", + "FLOAT": "", + "GLIGEN": "", + "IMAGE": "#80a1c0", + "IMAGEUPLOAD": "", + "INT": "", + "LATENT": "#b38ead", + "LATENT_KEYFRAME": "", + "MASK": "#a3bd8d", + "MODEL": "#8978a7", + "SAMPLER": "", + "SIGMAS": "", + "STRING": "", + "STYLE_MODEL": "#C2FFAE", + "T2I_ADAPTER_WEIGHTS": "", + "TAESD": "#DCC274", + "TIMESTEP_KEYFRAME": "", + "UPSCALE_MODEL": "", + "VAE": "#be616b" + }, + "litegraph_base": { + "BACKGROUND_IMAGE": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAGQAAABkCAIAAAD/gAIDAAAACXBIWXMAAAsTAAALEwEAmpwYAAAGlmlUWHRYTUw6Y29tLmFkb2JlLnhtcAAAAAAAPD94cGFja2V0IGJlZ2luPSLvu78iIGlkPSJXNU0wTXBDZWhpSHpyZVN6TlRjemtjOWQiPz4gPHg6eG1wbWV0YSB4bWxuczp4PSJhZG9iZTpuczptZXRhLyIgeDp4bXB0az0iQWRvYmUgWE1QIENvcmUgOS4xLWMwMDEgNzkuMTQ2Mjg5OSwgMjAyMy8wNi8yNS0yMDowMTo1NSAgICAgICAgIj4gPHJkZjpSREYgeG1sbnM6cmRmPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5LzAyLzIyLXJkZi1zeW50YXgtbnMjIj4gPHJkZjpEZXNjcmlwdGlvbiByZGY6YWJvdXQ9IiIgeG1sbnM6eG1wPSJodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvIiB4bWxuczpkYz0iaHR0cDovL3B1cmwub3JnL2RjL2VsZW1lbnRzLzEuMS8iIHhtbG5zOnBob3Rvc2hvcD0iaHR0cDovL25zLmFkb2JlLmNvbS9waG90b3Nob3AvMS4wLyIgeG1sbnM6eG1wTU09Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC9tbS8iIHhtbG5zOnN0RXZ0PSJodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvc1R5cGUvUmVzb3VyY2VFdmVudCMiIHhtcDpDcmVhdG9yVG9vbD0iQWRvYmUgUGhvdG9zaG9wIDI1LjEgKFdpbmRvd3MpIiB4bXA6Q3JlYXRlRGF0ZT0iMjAyMy0xMS0xM1QwMDoxODowMiswMTowMCIgeG1wOk1vZGlmeURhdGU9IjIwMjMtMTEtMTVUMDI6MDQ6NTkrMDE6MDAiIHhtcDpNZXRhZGF0YURhdGU9IjIwMjMtMTEtMTVUMDI6MDQ6NTkrMDE6MDAiIGRjOmZvcm1hdD0iaW1hZ2UvcG5nIiBwaG90b3Nob3A6Q29sb3JNb2RlPSIzIiB4bXBNTTpJbnN0YW5jZUlEPSJ4bXAuaWlkOmIyYzRhNjA5LWJmYTctYTg0MC1iOGFlLTk3MzE2ZjM1ZGIyNyIgeG1wTU06RG9jdW1lbnRJRD0iYWRvYmU6ZG9jaWQ6cGhvdG9zaG9wOjk0ZmNlZGU4LTE1MTctZmQ0MC04ZGU3LWYzOTgxM2E3ODk5ZiIgeG1wTU06T3JpZ2luYWxEb2N1bWVudElEPSJ4bXAuZGlkOjIzMWIxMGIwLWI0ZmItMDI0ZS1iMTJlLTMwNTMwM2NkMDdjOCI+IDx4bXBNTTpIaXN0b3J5PiA8cmRmOlNlcT4gPHJkZjpsaSBzdEV2dDphY3Rpb249ImNyZWF0ZWQiIHN0RXZ0Omluc3RhbmNlSUQ9InhtcC5paWQ6MjMxYjEwYjAtYjRmYi0wMjRlLWIxMmUtMzA1MzAzY2QwN2M4IiBzdEV2dDp3aGVuPSIyMDIzLTExLTEzVDAwOjE4OjAyKzAxOjAwIiBzdEV2dDpzb2Z0d2FyZUFnZW50PSJBZG9iZSBQaG90b3Nob3AgMjUuMSAoV2luZG93cykiLz4gPHJkZjpsaSBzdEV2dDphY3Rpb249InNhdmVkIiBzdEV2dDppbnN0YW5jZUlEPSJ4bXAuaWlkOjQ4OWY1NzlmLTJkNjUtZWQ0Zi04OTg0LTA4NGE2MGE1ZTMzNSIgc3RFdnQ6d2hlbj0iMjAyMy0xMS0xNVQwMjowNDo1OSswMTowMCIgc3RFdnQ6c29mdHdhcmVBZ2VudD0iQWRvYmUgUGhvdG9zaG9wIDI1LjEgKFdpbmRvd3MpIiBzdEV2dDpjaGFuZ2VkPSIvIi8+IDxyZGY6bGkgc3RFdnQ6YWN0aW9uPSJzYXZlZCIgc3RFdnQ6aW5zdGFuY2VJRD0ieG1wLmlpZDpiMmM0YTYwOS1iZmE3LWE4NDAtYjhhZS05NzMxNmYzNWRiMjciIHN0RXZ0OndoZW49IjIwMjMtMTEtMTVUMDI6MDQ6NTkrMDE6MDAiIHN0RXZ0OnNvZnR3YXJlQWdlbnQ9IkFkb2JlIFBob3Rvc2hvcCAyNS4xIChXaW5kb3dzKSIgc3RFdnQ6Y2hhbmdlZD0iLyIvPiA8L3JkZjpTZXE+IDwveG1wTU06SGlzdG9yeT4gPC9yZGY6RGVzY3JpcHRpb24+IDwvcmRmOlJERj4gPC94OnhtcG1ldGE+IDw/eHBhY2tldCBlbmQ9InIiPz4OTe6GAAAAx0lEQVR42u3WMQoAIQxFwRzJys77X8vSLiRgITif7bYbgrwYc/mKXyBoY4VVBgsWLFiwYFmOlTv+9jfDOjHmr8u6eVkGCxYsWLBgmc5S8ApewXvgYRksWLBgKXidpeBdloL3wMOCBctgwVLwCl7BuyyDBQsWLFiwTGcpeAWv4D3wsAwWLFiwFLzOUvAuS8F74GHBgmWwYCl4Ba/gXZbBggULFixYprMUvIJX8B54WAYLFixYCl5nKXiXpeA98LBgwTJYsGC9tg1o8f4TTtqzNQAAAABJRU5ErkJggg==", + "CLEAR_BACKGROUND_COLOR": "#040506", + "NODE_TITLE_COLOR": "#999", + "NODE_SELECTED_TITLE_COLOR": "#e5eaf0", + "NODE_TEXT_SIZE": 14, + "NODE_TEXT_COLOR": "#bcc2c8", + "NODE_SUBTEXT_SIZE": 12, + "NODE_DEFAULT_COLOR": "#161b22", + "NODE_DEFAULT_BGCOLOR": "#13171d", + "NODE_DEFAULT_BOXCOLOR": "#30363d", + "NODE_DEFAULT_SHAPE": "box", + "NODE_BOX_OUTLINE_COLOR": "#e5eaf0", + "DEFAULT_SHADOW_COLOR": "rgba(0,0,0,0.5)", + "DEFAULT_GROUP_FONT": 24, + "WIDGET_BGCOLOR": "#161b22", + "WIDGET_OUTLINE_COLOR": "#30363d", + "WIDGET_TEXT_COLOR": "#bcc2c8", + "WIDGET_SECONDARY_TEXT_COLOR": "#999", + "LINK_COLOR": "#9A9", + "EVENT_LINK_COLOR": "#A86", + "CONNECTING_LINK_COLOR": "#AFA" + }, + "comfy_base": { + "fg-color": "#e5eaf0", + "bg-color": "#161b22", + "comfy-menu-bg": "#13171d", + "comfy-input-bg": "#161b22", + "input-text": "#bcc2c8", + "descrip-text": "#999", + "drag-text": "#ccc", + "error-text": "#ff4444", + "border-color": "#30363d", + "tr-even-bg-color": "#161b22", + "tr-odd-bg-color": "#13171d" + } + }, } }; From 57eea0efbb07a48d4810b477b29d44ba5425a742 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 14 Nov 2023 23:45:36 -0500 Subject: [PATCH 097/170] heunpp2 sampler. --- comfy/k_diffusion/sampling.py | 58 +++++++++++++++++++++++++++++++++++ comfy/samplers.py | 2 +- 2 files changed, 59 insertions(+), 1 deletion(-) diff --git a/comfy/k_diffusion/sampling.py b/comfy/k_diffusion/sampling.py index dd6f7bbe5..761c2e0ef 100644 --- a/comfy/k_diffusion/sampling.py +++ b/comfy/k_diffusion/sampling.py @@ -750,3 +750,61 @@ def sample_lcm(model, x, sigmas, extra_args=None, callback=None, disable=None, n if sigmas[i + 1] > 0: x += sigmas[i + 1] * noise_sampler(sigmas[i], sigmas[i + 1]) return x + + + +@torch.no_grad() +def sample_heunpp2(model, x, sigmas, extra_args=None, callback=None, disable=None, s_churn=0., s_tmin=0., s_tmax=float('inf'), s_noise=1.): + # From MIT licensed: https://github.com/Carzit/sd-webui-samplers-scheduler/ + extra_args = {} if extra_args is None else extra_args + s_in = x.new_ones([x.shape[0]]) + s_end = sigmas[-1] + for i in trange(len(sigmas) - 1, disable=disable): + gamma = min(s_churn / (len(sigmas) - 1), 2 ** 0.5 - 1) if s_tmin <= sigmas[i] <= s_tmax else 0. + eps = torch.randn_like(x) * s_noise + sigma_hat = sigmas[i] * (gamma + 1) + if gamma > 0: + x = x + eps * (sigma_hat ** 2 - sigmas[i] ** 2) ** 0.5 + denoised = model(x, sigma_hat * s_in, **extra_args) + d = to_d(x, sigma_hat, denoised) + if callback is not None: + callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigma_hat, 'denoised': denoised}) + dt = sigmas[i + 1] - sigma_hat + if sigmas[i + 1] == s_end: + # Euler method + x = x + d * dt + elif sigmas[i + 2] == s_end: + + # Heun's method + x_2 = x + d * dt + denoised_2 = model(x_2, sigmas[i + 1] * s_in, **extra_args) + d_2 = to_d(x_2, sigmas[i + 1], denoised_2) + + w = 2 * sigmas[0] + w2 = sigmas[i+1]/w + w1 = 1 - w2 + + d_prime = d * w1 + d_2 * w2 + + + x = x + d_prime * dt + + else: + # Heun++ + x_2 = x + d * dt + denoised_2 = model(x_2, sigmas[i + 1] * s_in, **extra_args) + d_2 = to_d(x_2, sigmas[i + 1], denoised_2) + dt_2 = sigmas[i + 2] - sigmas[i + 1] + + x_3 = x_2 + d_2 * dt_2 + denoised_3 = model(x_3, sigmas[i + 2] * s_in, **extra_args) + d_3 = to_d(x_3, sigmas[i + 2], denoised_3) + + w = 3 * sigmas[0] + w2 = sigmas[i + 1] / w + w3 = sigmas[i + 2] / w + w1 = 1 - w2 - w3 + + d_prime = w1 * d + w2 * d_2 + w3 * d_3 + x = x + d_prime * dt + return x diff --git a/comfy/samplers.py b/comfy/samplers.py index 65c44791d..d8037d8ea 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -518,7 +518,7 @@ class UNIPCBH2(Sampler): def sample(self, model_wrap, sigmas, extra_args, callback, noise, latent_image=None, denoise_mask=None, disable_pbar=False): return uni_pc.sample_unipc(model_wrap, noise, latent_image, sigmas, max_denoise=self.max_denoise(model_wrap, sigmas), extra_args=extra_args, noise_mask=denoise_mask, callback=callback, variant='bh2', disable=disable_pbar) -KSAMPLER_NAMES = ["euler", "euler_ancestral", "heun", "dpm_2", "dpm_2_ancestral", +KSAMPLER_NAMES = ["euler", "euler_ancestral", "heun", "heunpp2","dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm"] From 7114cfec0eefe713340257c85a2b342e98fdcfb2 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 15 Nov 2023 15:55:02 -0500 Subject: [PATCH 098/170] Always clone graph data when loading to fix some load issues. --- web/scripts/app.js | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/web/scripts/app.js b/web/scripts/app.js index d22b98c31..4507527f6 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -1489,16 +1489,18 @@ export class ComfyApp { let reset_invalid_values = false; if (!graphData) { - if (typeof structuredClone === "undefined") - { - graphData = JSON.parse(JSON.stringify(defaultGraph)); - }else - { - graphData = structuredClone(defaultGraph); - } + graphData = defaultGraph; reset_invalid_values = true; } + if (typeof structuredClone === "undefined") + { + graphData = JSON.parse(JSON.stringify(graphData)); + }else + { + graphData = structuredClone(graphData); + } + const missingNodeTypes = []; for (let n of graphData.nodes) { // Patch T2IAdapterLoader to ControlNetLoader since they are the same node now From dcec1047e6bb04880551a64cdb8f31dbde920ea0 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 16 Nov 2023 04:07:35 -0500 Subject: [PATCH 099/170] Invert the start and end percentages in the code. This doesn't affect how percentages behave in the frontend but breaks things if you relied on them in the backend. percent_to_sigma goes from 0 to 1.0 instead of 1.0 to 0 for less confusion. Make percent 0 return an extremely large sigma and percent 1.0 return a zero one to fix imprecision. --- comfy/controlnet.py | 4 ++-- comfy/model_sampling.py | 5 +++++ comfy/samplers.py | 2 ++ comfy_extras/nodes_model_advanced.py | 5 +++++ nodes.py | 6 +++--- 5 files changed, 17 insertions(+), 5 deletions(-) diff --git a/comfy/controlnet.py b/comfy/controlnet.py index 098681582..433381df6 100644 --- a/comfy/controlnet.py +++ b/comfy/controlnet.py @@ -33,7 +33,7 @@ class ControlBase: self.cond_hint_original = None self.cond_hint = None self.strength = 1.0 - self.timestep_percent_range = (1.0, 0.0) + self.timestep_percent_range = (0.0, 1.0) self.timestep_range = None if device is None: @@ -42,7 +42,7 @@ class ControlBase: self.previous_controlnet = None self.global_average_pooling = False - def set_cond_hint(self, cond_hint, strength=1.0, timestep_percent_range=(1.0, 0.0)): + def set_cond_hint(self, cond_hint, strength=1.0, timestep_percent_range=(0.0, 1.0)): self.cond_hint_original = cond_hint self.strength = strength self.timestep_percent_range = timestep_percent_range diff --git a/comfy/model_sampling.py b/comfy/model_sampling.py index a2935d47d..d5b1642ef 100644 --- a/comfy/model_sampling.py +++ b/comfy/model_sampling.py @@ -76,5 +76,10 @@ class ModelSamplingDiscrete(torch.nn.Module): return log_sigma.exp() def percent_to_sigma(self, percent): + if percent <= 0.0: + return torch.tensor(999999999.9) + if percent >= 1.0: + return torch.tensor(0.0) + percent = 1.0 - percent return self.sigma(torch.tensor(percent * 999.0)) diff --git a/comfy/samplers.py b/comfy/samplers.py index d8037d8ea..1d012a514 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -220,6 +220,8 @@ def sampling_function(model, x, timestep, uncond, cond, cond_scale, model_option transformer_options["patches"] = patches transformer_options["cond_or_uncond"] = cond_or_uncond[:] + transformer_options["sigmas"] = timestep + c['transformer_options'] = transformer_options if 'model_function_wrapper' in model_options: diff --git a/comfy_extras/nodes_model_advanced.py b/comfy_extras/nodes_model_advanced.py index 399123eaa..c8c4b4a1e 100644 --- a/comfy_extras/nodes_model_advanced.py +++ b/comfy_extras/nodes_model_advanced.py @@ -66,6 +66,11 @@ class ModelSamplingDiscreteLCM(torch.nn.Module): return log_sigma.exp() def percent_to_sigma(self, percent): + if percent <= 0.0: + return torch.tensor(999999999.9) + if percent >= 1.0: + return torch.tensor(0.0) + percent = 1.0 - percent return self.sigma(torch.tensor(percent * 999.0)) diff --git a/nodes.py b/nodes.py index 2bbfd8fe8..e8cfb5e6a 100644 --- a/nodes.py +++ b/nodes.py @@ -248,8 +248,8 @@ class ConditioningSetTimestepRange: c = [] for t in conditioning: d = t[1].copy() - d['start_percent'] = 1.0 - start - d['end_percent'] = 1.0 - end + d['start_percent'] = start + d['end_percent'] = end n = [t[0], d] c.append(n) return (c, ) @@ -685,7 +685,7 @@ class ControlNetApplyAdvanced: if prev_cnet in cnets: c_net = cnets[prev_cnet] else: - c_net = control_net.copy().set_cond_hint(control_hint, strength, (1.0 - start_percent, 1.0 - end_percent)) + c_net = control_net.copy().set_cond_hint(control_hint, strength, (start_percent, end_percent)) c_net.set_previous_controlnet(prev_cnet) cnets[prev_cnet] = c_net From 7ea6bb038cf488224269565bf0e0bcc400f0a7e2 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 16 Nov 2023 12:57:12 -0500 Subject: [PATCH 100/170] Print warning when controlnet can't be applied instead of crashing. --- comfy/ldm/modules/diffusionmodules/openaimodel.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/comfy/ldm/modules/diffusionmodules/openaimodel.py b/comfy/ldm/modules/diffusionmodules/openaimodel.py index cac0dfb65..504b79ede 100644 --- a/comfy/ldm/modules/diffusionmodules/openaimodel.py +++ b/comfy/ldm/modules/diffusionmodules/openaimodel.py @@ -255,7 +255,10 @@ def apply_control(h, control, name): if control is not None and name in control and len(control[name]) > 0: ctrl = control[name].pop() if ctrl is not None: - h += ctrl + try: + h += ctrl + except: + print("warning control could not be applied", h.shape, ctrl.shape) return h class UNetModel(nn.Module): From bd07ad1861949007139de7dd5c6bcdb77426919c Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 16 Nov 2023 13:23:25 -0500 Subject: [PATCH 101/170] Add PatchModelAddDownscale (Kohya Deep Shrink) node. By adding a downscale to the unet in the first timesteps this node lets you generate images at higher resolutions with less consistency issues. --- comfy_extras/nodes_model_downscale.py | 45 +++++++++++++++++++++++++++ nodes.py | 1 + 2 files changed, 46 insertions(+) create mode 100644 comfy_extras/nodes_model_downscale.py diff --git a/comfy_extras/nodes_model_downscale.py b/comfy_extras/nodes_model_downscale.py new file mode 100644 index 000000000..f1b2d3ff2 --- /dev/null +++ b/comfy_extras/nodes_model_downscale.py @@ -0,0 +1,45 @@ +import torch + +class PatchModelAddDownscale: + @classmethod + def INPUT_TYPES(s): + return {"required": { "model": ("MODEL",), + "block_number": ("INT", {"default": 3, "min": 1, "max": 32, "step": 1}), + "downscale_factor": ("FLOAT", {"default": 2.0, "min": 0.1, "max": 9.0, "step": 0.001}), + "start_percent": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}), + "end_percent": ("FLOAT", {"default": 0.35, "min": 0.0, "max": 1.0, "step": 0.001}), + }} + RETURN_TYPES = ("MODEL",) + FUNCTION = "patch" + + CATEGORY = "_for_testing" + + def patch(self, model, block_number, downscale_factor, start_percent, end_percent): + sigma_start = model.model.model_sampling.percent_to_sigma(start_percent).item() + sigma_end = model.model.model_sampling.percent_to_sigma(end_percent).item() + + def input_block_patch(h, transformer_options): + if transformer_options["block"][1] == block_number: + sigma = transformer_options["sigmas"][0].item() + if sigma <= sigma_start and sigma >= sigma_end: + h = torch.nn.functional.interpolate(h, scale_factor=(1.0 / downscale_factor), mode="bicubic", align_corners=False) + return h + + def output_block_patch(h, hsp, transformer_options): + if h.shape[2] != hsp.shape[2]: + h = torch.nn.functional.interpolate(h, size=(hsp.shape[2], hsp.shape[3]), mode="bicubic", align_corners=False) + return h, hsp + + m = model.clone() + m.set_model_input_block_patch(input_block_patch) + m.set_model_output_block_patch(output_block_patch) + return (m, ) + +NODE_CLASS_MAPPINGS = { + "PatchModelAddDownscale": PatchModelAddDownscale, +} + +NODE_DISPLAY_NAME_MAPPINGS = { + # Sampling + "PatchModelAddDownscale": "PatchModelAddDownscale (Kohya Deep Shrink)", +} diff --git a/nodes.py b/nodes.py index e8cfb5e6a..f9d2d7f6c 100644 --- a/nodes.py +++ b/nodes.py @@ -1799,6 +1799,7 @@ def init_custom_nodes(): "nodes_custom_sampler.py", "nodes_hypertile.py", "nodes_model_advanced.py", + "nodes_model_downscale.py", ] for node_file in extras_files: From 9f00a18095e5f8ef114525bc19db035756501959 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 16 Nov 2023 14:59:54 -0500 Subject: [PATCH 102/170] Fix potential issues. --- comfy/model_patcher.py | 2 +- comfy/utils.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/comfy/model_patcher.py b/comfy/model_patcher.py index 023684331..7f5ed45fe 100644 --- a/comfy/model_patcher.py +++ b/comfy/model_patcher.py @@ -37,7 +37,7 @@ class ModelPatcher: return size def clone(self): - n = ModelPatcher(self.model, self.load_device, self.offload_device, self.size, self.current_device) + n = ModelPatcher(self.model, self.load_device, self.offload_device, self.size, self.current_device, weight_inplace_update=self.weight_inplace_update) n.patches = {} for k in self.patches: n.patches[k] = self.patches[k][:] diff --git a/comfy/utils.py b/comfy/utils.py index 1985012e0..f4c0ab419 100644 --- a/comfy/utils.py +++ b/comfy/utils.py @@ -258,7 +258,7 @@ def set_attr(obj, attr, value): for name in attrs[:-1]: obj = getattr(obj, name) prev = getattr(obj, attrs[-1]) - setattr(obj, attrs[-1], torch.nn.Parameter(value)) + setattr(obj, attrs[-1], torch.nn.Parameter(value, requires_grad=False)) del prev def copy_to_param(obj, attr, value): From 7e3fe3ad28fad4dede2893d77093a086344b81b6 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 16 Nov 2023 15:26:28 -0500 Subject: [PATCH 103/170] Make deep shrink behave like it should. --- comfy/ldm/modules/diffusionmodules/openaimodel.py | 4 ++++ comfy/model_patcher.py | 3 +++ comfy_extras/nodes_model_downscale.py | 8 ++++++-- 3 files changed, 13 insertions(+), 2 deletions(-) diff --git a/comfy/ldm/modules/diffusionmodules/openaimodel.py b/comfy/ldm/modules/diffusionmodules/openaimodel.py index 504b79ede..10eb68d73 100644 --- a/comfy/ldm/modules/diffusionmodules/openaimodel.py +++ b/comfy/ldm/modules/diffusionmodules/openaimodel.py @@ -633,6 +633,10 @@ class UNetModel(nn.Module): h = p(h, transformer_options) hs.append(h) + if "input_block_patch_after_skip" in transformer_patches: + patch = transformer_patches["input_block_patch_after_skip"] + for p in patch: + h = p(h, transformer_options) transformer_options["block"] = ("middle", 0) h = forward_timestep_embed(self.middle_block, h, emb, context, transformer_options) diff --git a/comfy/model_patcher.py b/comfy/model_patcher.py index 7f5ed45fe..a3cffc3be 100644 --- a/comfy/model_patcher.py +++ b/comfy/model_patcher.py @@ -99,6 +99,9 @@ class ModelPatcher: def set_model_input_block_patch(self, patch): self.set_model_patch(patch, "input_block_patch") + def set_model_input_block_patch_after_skip(self, patch): + self.set_model_patch(patch, "input_block_patch_after_skip") + def set_model_output_block_patch(self, patch): self.set_model_patch(patch, "output_block_patch") diff --git a/comfy_extras/nodes_model_downscale.py b/comfy_extras/nodes_model_downscale.py index f1b2d3ff2..8850d0948 100644 --- a/comfy_extras/nodes_model_downscale.py +++ b/comfy_extras/nodes_model_downscale.py @@ -8,13 +8,14 @@ class PatchModelAddDownscale: "downscale_factor": ("FLOAT", {"default": 2.0, "min": 0.1, "max": 9.0, "step": 0.001}), "start_percent": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}), "end_percent": ("FLOAT", {"default": 0.35, "min": 0.0, "max": 1.0, "step": 0.001}), + "downscale_after_skip": ("BOOLEAN", {"default": True}), }} RETURN_TYPES = ("MODEL",) FUNCTION = "patch" CATEGORY = "_for_testing" - def patch(self, model, block_number, downscale_factor, start_percent, end_percent): + def patch(self, model, block_number, downscale_factor, start_percent, end_percent, downscale_after_skip): sigma_start = model.model.model_sampling.percent_to_sigma(start_percent).item() sigma_end = model.model.model_sampling.percent_to_sigma(end_percent).item() @@ -31,7 +32,10 @@ class PatchModelAddDownscale: return h, hsp m = model.clone() - m.set_model_input_block_patch(input_block_patch) + if downscale_after_skip: + m.set_model_input_block_patch_after_skip(input_block_patch) + else: + m.set_model_input_block_patch(input_block_patch) m.set_model_output_block_patch(output_block_patch) return (m, ) From 107e78b1cb079f652408bece8b0045927dc9f1fd Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 16 Nov 2023 23:12:55 -0500 Subject: [PATCH 104/170] Add support for loading SSD1B diffusers unet version. Improve diffusers model detection. --- comfy/model_detection.py | 78 +++++++++++++++++++++++----------------- 1 file changed, 45 insertions(+), 33 deletions(-) diff --git a/comfy/model_detection.py b/comfy/model_detection.py index 4f4e0b3b7..d65d91e7c 100644 --- a/comfy/model_detection.py +++ b/comfy/model_detection.py @@ -186,17 +186,24 @@ def convert_config(unet_config): def unet_config_from_diffusers_unet(state_dict, dtype): match = {} - attention_resolutions = [] + transformer_depth = [] attn_res = 1 - for i in range(5): - k = "down_blocks.{}.attentions.1.transformer_blocks.0.attn2.to_k.weight".format(i) - if k in state_dict: - match["context_dim"] = state_dict[k].shape[1] - attention_resolutions.append(attn_res) - attn_res *= 2 + down_blocks = count_blocks(state_dict, "down_blocks.{}") + for i in range(down_blocks): + attn_blocks = count_blocks(state_dict, "down_blocks.{}.attentions.".format(i) + '{}') + for ab in range(attn_blocks): + transformer_count = count_blocks(state_dict, "down_blocks.{}.attentions.{}.transformer_blocks.".format(i, ab) + '{}') + transformer_depth.append(transformer_count) + if transformer_count > 0: + match["context_dim"] = state_dict["down_blocks.{}.attentions.{}.transformer_blocks.0.attn2.to_k.weight".format(i, ab)].shape[1] - match["attention_resolutions"] = attention_resolutions + attn_res *= 2 + if attn_blocks == 0: + transformer_depth.append(0) + transformer_depth.append(0) + + match["transformer_depth"] = transformer_depth match["model_channels"] = state_dict["conv_in.weight"].shape[0] match["in_channels"] = state_dict["conv_in.weight"].shape[1] @@ -208,50 +215,55 @@ def unet_config_from_diffusers_unet(state_dict, dtype): SDXL = {'use_checkpoint': False, 'image_size': 32, 'out_channels': 4, 'use_spatial_transformer': True, 'legacy': False, 'num_classes': 'sequential', 'adm_in_channels': 2816, 'dtype': dtype, 'in_channels': 4, 'model_channels': 320, - 'num_res_blocks': 2, 'attention_resolutions': [2, 4], 'transformer_depth': [0, 2, 10], 'channel_mult': [1, 2, 4], - 'transformer_depth_middle': 10, 'use_linear_in_transformer': True, 'context_dim': 2048, "num_head_channels": 64} + 'num_res_blocks': [2, 2, 2], 'transformer_depth': [0, 0, 2, 2, 10, 10], 'channel_mult': [1, 2, 4], 'transformer_depth_middle': 10, + 'use_linear_in_transformer': True, 'context_dim': 2048, 'num_head_channels': 64, 'transformer_depth_output': [0, 0, 0, 2, 2, 2, 10, 10, 10]} SDXL_refiner = {'use_checkpoint': False, 'image_size': 32, 'out_channels': 4, 'use_spatial_transformer': True, 'legacy': False, 'num_classes': 'sequential', 'adm_in_channels': 2560, 'dtype': dtype, 'in_channels': 4, 'model_channels': 384, - 'num_res_blocks': 2, 'attention_resolutions': [2, 4], 'transformer_depth': [0, 4, 4, 0], 'channel_mult': [1, 2, 4, 4], - 'transformer_depth_middle': 4, 'use_linear_in_transformer': True, 'context_dim': 1280, "num_head_channels": 64} + 'num_res_blocks': [2, 2, 2, 2], 'transformer_depth': [0, 0, 4, 4, 4, 4, 0, 0], 'channel_mult': [1, 2, 4, 4], 'transformer_depth_middle': 4, + 'use_linear_in_transformer': True, 'context_dim': 1280, 'num_head_channels': 64, 'transformer_depth_output': [0, 0, 0, 4, 4, 4, 4, 4, 4, 0, 0, 0]} SD21 = {'use_checkpoint': False, 'image_size': 32, 'out_channels': 4, 'use_spatial_transformer': True, 'legacy': False, - 'adm_in_channels': None, 'dtype': dtype, 'in_channels': 4, 'model_channels': 320, 'num_res_blocks': 2, - 'attention_resolutions': [1, 2, 4], 'transformer_depth': [1, 1, 1, 0], 'channel_mult': [1, 2, 4, 4], - 'transformer_depth_middle': 1, 'use_linear_in_transformer': True, 'context_dim': 1024, "num_head_channels": 64} + 'adm_in_channels': None, 'dtype': dtype, 'in_channels': 4, 'model_channels': 320, 'num_res_blocks': [2, 2, 2, 2], + 'transformer_depth': [1, 1, 1, 1, 1, 1, 0, 0], 'channel_mult': [1, 2, 4, 4], 'transformer_depth_middle': 1, 'use_linear_in_transformer': True, + 'context_dim': 1024, 'num_head_channels': 64, 'transformer_depth_output': [1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0]} SD21_uncliph = {'use_checkpoint': False, 'image_size': 32, 'out_channels': 4, 'use_spatial_transformer': True, 'legacy': False, 'num_classes': 'sequential', 'adm_in_channels': 2048, 'dtype': dtype, 'in_channels': 4, 'model_channels': 320, - 'num_res_blocks': 2, 'attention_resolutions': [1, 2, 4], 'transformer_depth': [1, 1, 1, 0], 'channel_mult': [1, 2, 4, 4], - 'transformer_depth_middle': 1, 'use_linear_in_transformer': True, 'context_dim': 1024, "num_head_channels": 64} + 'num_res_blocks': [2, 2, 2, 2], 'transformer_depth': [1, 1, 1, 1, 1, 1, 0, 0], 'channel_mult': [1, 2, 4, 4], 'transformer_depth_middle': 1, + 'use_linear_in_transformer': True, 'context_dim': 1024, 'num_head_channels': 64, 'transformer_depth_output': [1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0]} SD21_unclipl = {'use_checkpoint': False, 'image_size': 32, 'out_channels': 4, 'use_spatial_transformer': True, 'legacy': False, 'num_classes': 'sequential', 'adm_in_channels': 1536, 'dtype': dtype, 'in_channels': 4, 'model_channels': 320, - 'num_res_blocks': 2, 'attention_resolutions': [1, 2, 4], 'transformer_depth': [1, 1, 1, 0], 'channel_mult': [1, 2, 4, 4], - 'transformer_depth_middle': 1, 'use_linear_in_transformer': True, 'context_dim': 1024} + 'num_res_blocks': [2, 2, 2, 2], 'transformer_depth': [1, 1, 1, 1, 1, 1, 0, 0], 'channel_mult': [1, 2, 4, 4], 'transformer_depth_middle': 1, + 'use_linear_in_transformer': True, 'context_dim': 1024, 'num_head_channels': 64, 'transformer_depth_output': [1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0]} - SD15 = {'use_checkpoint': False, 'image_size': 32, 'out_channels': 4, 'use_spatial_transformer': True, 'legacy': False, - 'adm_in_channels': None, 'dtype': dtype, 'in_channels': 4, 'model_channels': 320, 'num_res_blocks': 2, - 'attention_resolutions': [1, 2, 4], 'transformer_depth': [1, 1, 1, 0], 'channel_mult': [1, 2, 4, 4], - 'transformer_depth_middle': 1, 'use_linear_in_transformer': False, 'context_dim': 768, "num_heads": 8} + SD15 = {'use_checkpoint': False, 'image_size': 32, 'out_channels': 4, 'use_spatial_transformer': True, 'legacy': False, 'adm_in_channels': None, + 'dtype': dtype, 'in_channels': 4, 'model_channels': 320, 'num_res_blocks': [2, 2, 2, 2], 'transformer_depth': [1, 1, 1, 1, 1, 1, 0, 0], + 'channel_mult': [1, 2, 4, 4], 'transformer_depth_middle': 1, 'use_linear_in_transformer': False, 'context_dim': 768, 'num_heads': 8, + 'transformer_depth_output': [1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0]} SDXL_mid_cnet = {'use_checkpoint': False, 'image_size': 32, 'out_channels': 4, 'use_spatial_transformer': True, 'legacy': False, - 'num_classes': 'sequential', 'adm_in_channels': 2816, 'dtype': dtype, 'in_channels': 4, 'model_channels': 320, - 'num_res_blocks': 2, 'attention_resolutions': [4], 'transformer_depth': [0, 0, 1], 'channel_mult': [1, 2, 4], - 'transformer_depth_middle': 1, 'use_linear_in_transformer': True, 'context_dim': 2048, "num_head_channels": 64} + 'num_classes': 'sequential', 'adm_in_channels': 2816, 'dtype': dtype, 'in_channels': 4, 'model_channels': 320, + 'num_res_blocks': [2, 2, 2], 'transformer_depth': [0, 0, 0, 0, 1, 1], 'channel_mult': [1, 2, 4], 'transformer_depth_middle': 1, + 'use_linear_in_transformer': True, 'context_dim': 2048, 'num_head_channels': 64, 'transformer_depth_output': [0, 0, 0, 0, 0, 0, 1, 1, 1]} SDXL_small_cnet = {'use_checkpoint': False, 'image_size': 32, 'out_channels': 4, 'use_spatial_transformer': True, 'legacy': False, - 'num_classes': 'sequential', 'adm_in_channels': 2816, 'dtype': dtype, 'in_channels': 4, 'model_channels': 320, - 'num_res_blocks': 2, 'attention_resolutions': [], 'transformer_depth': [0, 0, 0], 'channel_mult': [1, 2, 4], - 'transformer_depth_middle': 0, 'use_linear_in_transformer': True, "num_head_channels": 64, 'context_dim': 1} + 'num_classes': 'sequential', 'adm_in_channels': 2816, 'dtype': dtype, 'in_channels': 4, 'model_channels': 320, + 'num_res_blocks': [2, 2, 2], 'transformer_depth': [0, 0, 0, 0, 0, 0], 'channel_mult': [1, 2, 4], 'transformer_depth_middle': 0, + 'use_linear_in_transformer': True, 'num_head_channels': 64, 'context_dim': 1, 'transformer_depth_output': [0, 0, 0, 0, 0, 0, 0, 0, 0]} SDXL_diffusers_inpaint = {'use_checkpoint': False, 'image_size': 32, 'out_channels': 4, 'use_spatial_transformer': True, 'legacy': False, - 'num_classes': 'sequential', 'adm_in_channels': 2816, 'dtype': dtype, 'in_channels': 9, 'model_channels': 320, - 'num_res_blocks': 2, 'attention_resolutions': [2, 4], 'transformer_depth': [0, 2, 10], 'channel_mult': [1, 2, 4], - 'transformer_depth_middle': 10, 'use_linear_in_transformer': True, 'context_dim': 2048, "num_head_channels": 64} + 'num_classes': 'sequential', 'adm_in_channels': 2816, 'dtype': dtype, 'in_channels': 9, 'model_channels': 320, + 'num_res_blocks': [2, 2, 2], 'transformer_depth': [0, 0, 2, 2, 10, 10], 'channel_mult': [1, 2, 4], 'transformer_depth_middle': 10, + 'use_linear_in_transformer': True, 'context_dim': 2048, 'num_head_channels': 64, 'transformer_depth_output': [0, 0, 0, 2, 2, 2, 10, 10, 10]} - supported_models = [SDXL, SDXL_refiner, SD21, SD15, SD21_uncliph, SD21_unclipl, SDXL_mid_cnet, SDXL_small_cnet, SDXL_diffusers_inpaint] + SSD_1B = {'use_checkpoint': False, 'image_size': 32, 'out_channels': 4, 'use_spatial_transformer': True, 'legacy': False, + 'num_classes': 'sequential', 'adm_in_channels': 2816, 'dtype': dtype, 'in_channels': 4, 'model_channels': 320, + 'num_res_blocks': [2, 2, 2], 'transformer_depth': [0, 0, 2, 2, 4, 4], 'transformer_depth_output': [0, 0, 0, 1, 1, 2, 10, 4, 4], + 'channel_mult': [1, 2, 4], 'transformer_depth_middle': -1, 'use_linear_in_transformer': True, 'context_dim': 2048, 'num_head_channels': 64} + + supported_models = [SDXL, SDXL_refiner, SD21, SD15, SD21_uncliph, SD21_unclipl, SDXL_mid_cnet, SDXL_small_cnet, SDXL_diffusers_inpaint, SSD_1B] for unet_config in supported_models: matches = True From 0cf4e8693945d68000e37fe291f877eff9ef0aaa Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 17 Nov 2023 02:56:59 -0500 Subject: [PATCH 105/170] Add some command line arguments to store text encoder weights in fp8. Pytorch supports two variants of fp8: --fp8_e4m3fn-text-enc (the one that seems to give better results) --fp8_e5m2-text-enc --- comfy/cli_args.py | 7 +++++++ comfy/model_management.py | 15 +++++++++++++++ comfy/sd.py | 5 +---- 3 files changed, 23 insertions(+), 4 deletions(-) diff --git a/comfy/cli_args.py b/comfy/cli_args.py index e79b89c0f..72fce1087 100644 --- a/comfy/cli_args.py +++ b/comfy/cli_args.py @@ -62,6 +62,13 @@ fpvae_group.add_argument("--fp16-vae", action="store_true", help="Run the VAE in fpvae_group.add_argument("--fp32-vae", action="store_true", help="Run the VAE in full precision fp32.") fpvae_group.add_argument("--bf16-vae", action="store_true", help="Run the VAE in bf16.") +fpte_group = parser.add_mutually_exclusive_group() +fpte_group.add_argument("--fp8_e4m3fn-text-enc", action="store_true", help="Store text encoder weights in fp8 (e4m3fn variant).") +fpte_group.add_argument("--fp8_e5m2-text-enc", action="store_true", help="Store text encoder weights in fp8 (e5m2 variant).") +fpte_group.add_argument("--fp16-text-enc", action="store_true", help="Store text encoder weights in fp16.") +fpte_group.add_argument("--fp32-text-enc", action="store_true", help="Store text encoder weights in fp32.") + + parser.add_argument("--directml", type=int, nargs="?", metavar="DIRECTML_DEVICE", const=-1, help="Use torch-directml.") parser.add_argument("--disable-ipex-optimize", action="store_true", help="Disables ipex.optimize when loading models with Intel GPUs.") diff --git a/comfy/model_management.py b/comfy/model_management.py index be4301aa4..d4acd8950 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -482,6 +482,21 @@ def text_encoder_device(): else: return torch.device("cpu") +def text_encoder_dtype(device=None): + if args.fp8_e4m3fn_text_enc: + return torch.float8_e4m3fn + elif args.fp8_e5m2_text_enc: + return torch.float8_e5m2 + elif args.fp16_text_enc: + return torch.float16 + elif args.fp32_text_enc: + return torch.float32 + + if should_use_fp16(device, prioritize_performance=False): + return torch.float16 + else: + return torch.float32 + def vae_device(): return get_torch_device() diff --git a/comfy/sd.py b/comfy/sd.py index 65d94f46e..c3cc8e720 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -95,10 +95,7 @@ class CLIP: load_device = model_management.text_encoder_device() offload_device = model_management.text_encoder_offload_device() params['device'] = offload_device - if model_management.should_use_fp16(load_device, prioritize_performance=False): - params['dtype'] = torch.float16 - else: - params['dtype'] = torch.float32 + params['dtype'] = model_management.text_encoder_dtype(load_device) self.cond_stage_model = clip(**(params)) From 8a451234b3090db488fbee9740a5f6be2f989253 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sat, 18 Nov 2023 04:44:17 -0500 Subject: [PATCH 106/170] Add ImageCrop node. --- comfy_extras/nodes_images.py | 29 +++++++++++++++++++++++++++++ nodes.py | 1 + 2 files changed, 30 insertions(+) create mode 100644 comfy_extras/nodes_images.py diff --git a/comfy_extras/nodes_images.py b/comfy_extras/nodes_images.py new file mode 100644 index 000000000..2b8e93001 --- /dev/null +++ b/comfy_extras/nodes_images.py @@ -0,0 +1,29 @@ +import nodes +MAX_RESOLUTION = nodes.MAX_RESOLUTION + +class ImageCrop: + @classmethod + def INPUT_TYPES(s): + return {"required": { "image": ("IMAGE",), + "width": ("INT", {"default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1}), + "height": ("INT", {"default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1}), + "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}), + "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}), + }} + RETURN_TYPES = ("IMAGE",) + FUNCTION = "crop" + + CATEGORY = "image/transform" + + def crop(self, image, width, height, x, y): + x = min(x, image.shape[2] - 1) + y = min(y, image.shape[1] - 1) + to_x = width + x + to_y = height + y + img = image[:,y:to_y, x:to_x, :] + return (img,) + + +NODE_CLASS_MAPPINGS = { + "ImageCrop": ImageCrop, +} diff --git a/nodes.py b/nodes.py index f9d2d7f6c..2adc5e073 100644 --- a/nodes.py +++ b/nodes.py @@ -1800,6 +1800,7 @@ def init_custom_nodes(): "nodes_hypertile.py", "nodes_model_advanced.py", "nodes_model_downscale.py", + "nodes_images.py", ] for node_file in extras_files: From d9d8702d8dd2337c64610633f5df2dcd402379a8 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sat, 18 Nov 2023 23:20:29 -0500 Subject: [PATCH 107/170] percent_to_sigma now returns a float instead of a tensor. --- comfy/model_sampling.py | 6 +++--- comfy_extras/nodes_model_advanced.py | 6 +++--- comfy_extras/nodes_model_downscale.py | 4 ++-- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/comfy/model_sampling.py b/comfy/model_sampling.py index d5b1642ef..37a3ac725 100644 --- a/comfy/model_sampling.py +++ b/comfy/model_sampling.py @@ -77,9 +77,9 @@ class ModelSamplingDiscrete(torch.nn.Module): def percent_to_sigma(self, percent): if percent <= 0.0: - return torch.tensor(999999999.9) + return 999999999.9 if percent >= 1.0: - return torch.tensor(0.0) + return 0.0 percent = 1.0 - percent - return self.sigma(torch.tensor(percent * 999.0)) + return self.sigma(torch.tensor(percent * 999.0)).item() diff --git a/comfy_extras/nodes_model_advanced.py b/comfy_extras/nodes_model_advanced.py index c8c4b4a1e..0f4ddd9c3 100644 --- a/comfy_extras/nodes_model_advanced.py +++ b/comfy_extras/nodes_model_advanced.py @@ -67,11 +67,11 @@ class ModelSamplingDiscreteLCM(torch.nn.Module): def percent_to_sigma(self, percent): if percent <= 0.0: - return torch.tensor(999999999.9) + return 999999999.9 if percent >= 1.0: - return torch.tensor(0.0) + return 0.0 percent = 1.0 - percent - return self.sigma(torch.tensor(percent * 999.0)) + return self.sigma(torch.tensor(percent * 999.0)).item() def rescale_zero_terminal_snr_sigmas(sigmas): diff --git a/comfy_extras/nodes_model_downscale.py b/comfy_extras/nodes_model_downscale.py index 8850d0948..f65ef05e1 100644 --- a/comfy_extras/nodes_model_downscale.py +++ b/comfy_extras/nodes_model_downscale.py @@ -16,8 +16,8 @@ class PatchModelAddDownscale: CATEGORY = "_for_testing" def patch(self, model, block_number, downscale_factor, start_percent, end_percent, downscale_after_skip): - sigma_start = model.model.model_sampling.percent_to_sigma(start_percent).item() - sigma_end = model.model.model_sampling.percent_to_sigma(end_percent).item() + sigma_start = model.model.model_sampling.percent_to_sigma(start_percent) + sigma_end = model.model.model_sampling.percent_to_sigma(end_percent) def input_block_patch(h, transformer_options): if transformer_options["block"][1] == block_number: From dba4f3b4fce575994ed718ac31888620e8d6e733 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sun, 19 Nov 2023 06:09:01 -0500 Subject: [PATCH 108/170] Add a RepeatImageBatch node. --- comfy_extras/nodes_images.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/comfy_extras/nodes_images.py b/comfy_extras/nodes_images.py index 2b8e93001..8cb322327 100644 --- a/comfy_extras/nodes_images.py +++ b/comfy_extras/nodes_images.py @@ -23,7 +23,22 @@ class ImageCrop: img = image[:,y:to_y, x:to_x, :] return (img,) +class RepeatImageBatch: + @classmethod + def INPUT_TYPES(s): + return {"required": { "image": ("IMAGE",), + "amount": ("INT", {"default": 1, "min": 1, "max": 64}), + }} + RETURN_TYPES = ("IMAGE",) + FUNCTION = "repeat" + + CATEGORY = "image/batch" + + def repeat(self, image, amount): + s = image.repeat((amount, 1,1,1)) + return (s,) NODE_CLASS_MAPPINGS = { "ImageCrop": ImageCrop, + "RepeatImageBatch": RepeatImageBatch, } From 31c5ea7b2c79f36d3ebc729acf946ba47b4e5785 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 20 Nov 2023 03:55:51 -0500 Subject: [PATCH 109/170] Add LatentInterpolate to interpolate between latents. --- comfy_extras/nodes_latent.py | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/comfy_extras/nodes_latent.py b/comfy_extras/nodes_latent.py index 001de39fc..cedf39d63 100644 --- a/comfy_extras/nodes_latent.py +++ b/comfy_extras/nodes_latent.py @@ -1,4 +1,5 @@ import comfy.utils +import torch def reshape_latent_to(target_shape, latent): if latent.shape[1:] != target_shape[1:]: @@ -67,8 +68,43 @@ class LatentMultiply: samples_out["samples"] = s1 * multiplier return (samples_out,) +class LatentInterpolate: + @classmethod + def INPUT_TYPES(s): + return {"required": { "samples1": ("LATENT",), + "samples2": ("LATENT",), + "ratio": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), + }} + + RETURN_TYPES = ("LATENT",) + FUNCTION = "op" + + CATEGORY = "latent/advanced" + + def op(self, samples1, samples2, ratio): + samples_out = samples1.copy() + + s1 = samples1["samples"] + s2 = samples2["samples"] + + s2 = reshape_latent_to(s1.shape, s2) + + m1 = torch.linalg.vector_norm(s1, dim=(1)) + m2 = torch.linalg.vector_norm(s2, dim=(1)) + + s1 = torch.nan_to_num(s1 / m1) + s2 = torch.nan_to_num(s2 / m2) + + t = (s1 * ratio + s2 * (1.0 - ratio)) + mt = torch.linalg.vector_norm(t, dim=(1)) + st = torch.nan_to_num(t / mt) + + samples_out["samples"] = st * (m1 * ratio + m2 * (1.0 - ratio)) + return (samples_out,) + NODE_CLASS_MAPPINGS = { "LatentAdd": LatentAdd, "LatentSubtract": LatentSubtract, "LatentMultiply": LatentMultiply, + "LatentInterpolate": LatentInterpolate, } From a03dde190ede39675736e746c3045ecfc4baa79b Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 20 Nov 2023 16:38:39 -0500 Subject: [PATCH 110/170] Cap maximum history size at 10000. Delete oldest entry when reached. --- execution.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/execution.py b/execution.py index 918c2bc5c..9a2ca5b9d 100644 --- a/execution.py +++ b/execution.py @@ -681,6 +681,7 @@ def validate_prompt(prompt): return (True, None, list(good_outputs), node_errors) +MAXIMUM_HISTORY_SIZE = 10000 class PromptQueue: def __init__(self, server): @@ -713,6 +714,8 @@ class PromptQueue: def task_done(self, item_id, outputs): with self.mutex: prompt = self.currently_running.pop(item_id) + if len(self.history) > MAXIMUM_HISTORY_SIZE: + self.history.pop(next(iter(self.history))) self.history[prompt[1]] = { "prompt": prompt, "outputs": {} } for o in outputs: self.history[prompt[1]]["outputs"][o] = outputs[o] From 2dd5b4dd78fc0a30f3d5baa0b99a6b10f002d917 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 20 Nov 2023 16:51:41 -0500 Subject: [PATCH 111/170] Only show last 200 elements in the UI history tab. --- execution.py | 14 ++++++++++++-- server.py | 5 ++++- web/scripts/api.js | 2 +- 3 files changed, 17 insertions(+), 4 deletions(-) diff --git a/execution.py b/execution.py index 9a2ca5b9d..bca48a785 100644 --- a/execution.py +++ b/execution.py @@ -750,10 +750,20 @@ class PromptQueue: return True return False - def get_history(self, prompt_id=None): + def get_history(self, prompt_id=None, max_items=None, offset=-1): with self.mutex: if prompt_id is None: - return copy.deepcopy(self.history) + out = {} + i = 0 + if offset < 0 and max_items is not None: + offset = len(self.history) - max_items + for k in self.history: + if i >= offset: + out[k] = self.history[k] + if max_items is not None and len(out) >= max_items: + break + i += 1 + return out elif prompt_id in self.history: return {prompt_id: copy.deepcopy(self.history[prompt_id])} else: diff --git a/server.py b/server.py index 11bd2a0fb..1a8e92b8f 100644 --- a/server.py +++ b/server.py @@ -431,7 +431,10 @@ class PromptServer(): @routes.get("/history") async def get_history(request): - return web.json_response(self.prompt_queue.get_history()) + max_items = request.rel_url.query.get("max_items", None) + if max_items is not None: + max_items = int(max_items) + return web.json_response(self.prompt_queue.get_history(max_items=max_items)) @routes.get("/history/{prompt_id}") async def get_history(request): diff --git a/web/scripts/api.js b/web/scripts/api.js index b1d245d73..de56b2310 100644 --- a/web/scripts/api.js +++ b/web/scripts/api.js @@ -256,7 +256,7 @@ class ComfyApi extends EventTarget { */ async getHistory() { try { - const res = await this.fetchApi("/history"); + const res = await this.fetchApi("/history?max_items=200"); return { History: Object.values(await res.json()) }; } catch (error) { console.error(error); From ce67dcbcdabe2edf1497e37ecf1b6f976a3ecdf6 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 20 Nov 2023 22:27:36 -0500 Subject: [PATCH 112/170] Make it easy for models to process the unet state dict on load. --- comfy/model_base.py | 1 + comfy/supported_models_base.py | 3 +++ 2 files changed, 4 insertions(+) diff --git a/comfy/model_base.py b/comfy/model_base.py index 37bf24bb8..772e26934 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -121,6 +121,7 @@ class BaseModel(torch.nn.Module): if k.startswith(unet_prefix): to_load[k[len(unet_prefix):]] = sd.pop(k) + to_load = self.model_config.process_unet_state_dict(to_load) m, u = self.diffusion_model.load_state_dict(to_load, strict=False) if len(m) > 0: print("unet missing:", m) diff --git a/comfy/supported_models_base.py b/comfy/supported_models_base.py index 88a1d7fde..6dfae0343 100644 --- a/comfy/supported_models_base.py +++ b/comfy/supported_models_base.py @@ -53,6 +53,9 @@ class BASE: def process_clip_state_dict(self, state_dict): return state_dict + def process_unet_state_dict(self, state_dict): + return state_dict + def process_clip_state_dict_for_saving(self, state_dict): replace_prefix = {"": "cond_stage_model."} return utils.state_dict_prefix_replace(state_dict, replace_prefix) From 6ff06fa7960524749d8e584100a0e50594485f29 Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Tue, 21 Nov 2023 06:33:58 +0000 Subject: [PATCH 113/170] Animated image output support (#2008) * Refactor multiline widget into generic DOM widget * wip webp preview * webp support * fix check * fix sizing * show image when zoomed out * Swap webp checkto generic animated image flag * remove duplicate * Fix falsy check --- web/scripts/app.js | 78 +++++---- web/scripts/domWidget.js | 312 +++++++++++++++++++++++++++++++++ web/scripts/ui/imagePreview.js | 97 ++++++++++ web/scripts/widgets.js | 166 ++---------------- web/style.css | 15 ++ 5 files changed, 482 insertions(+), 186 deletions(-) create mode 100644 web/scripts/domWidget.js create mode 100644 web/scripts/ui/imagePreview.js diff --git a/web/scripts/app.js b/web/scripts/app.js index 4507527f6..601e486e6 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -4,7 +4,10 @@ import { ComfyUI, $el } from "./ui.js"; import { api } from "./api.js"; import { defaultGraph } from "./defaultGraph.js"; import { getPngMetadata, getWebpMetadata, importA1111, getLatentMetadata } from "./pnginfo.js"; +import { addDomClippingSetting } from "./domWidget.js"; +import { createImageHost, calculateImageGrid } from "./ui/imagePreview.js" +export const ANIM_PREVIEW_WIDGET = "$$comfy_animation_preview" function sanitizeNodeName(string) { let entityMap = { @@ -405,7 +408,9 @@ export class ComfyApp { return shiftY; } - node.prototype.setSizeForImage = function () { + node.prototype.setSizeForImage = function (force) { + if(!force && this.animatedImages) return; + if (this.inputHeight) { this.setSize(this.size); return; @@ -422,13 +427,20 @@ export class ComfyApp { let imagesChanged = false const output = app.nodeOutputs[this.id + ""]; - if (output && output.images) { + if (output?.images) { + this.animatedImages = output?.animated?.find(Boolean); if (this.images !== output.images) { this.images = output.images; imagesChanged = true; - imgURLs = imgURLs.concat(output.images.map(params => { - return api.apiURL("/view?" + new URLSearchParams(params).toString() + app.getPreviewFormatParam()); - })) + imgURLs = imgURLs.concat( + output.images.map((params) => { + return api.apiURL( + "/view?" + + new URLSearchParams(params).toString() + + (this.animatedImages ? "" : app.getPreviewFormatParam()) + ); + }) + ); } } @@ -507,7 +519,34 @@ export class ComfyApp { return true; } - if (this.imgs && this.imgs.length) { + if (this.imgs?.length) { + const widgetIdx = this.widgets?.findIndex((w) => w.name === ANIM_PREVIEW_WIDGET); + + if(this.animatedImages) { + // Instead of using the canvas we'll use a IMG + if(widgetIdx > -1) { + // Replace content + const widget = this.widgets[widgetIdx]; + widget.options.host.updateImages(this.imgs); + } else { + const host = createImageHost(this); + this.setSizeForImage(true); + const widget = this.addDOMWidget(ANIM_PREVIEW_WIDGET, "img", host.el, { + host, + getHeight: host.getHeight, + onDraw: host.onDraw, + hideOnZoom: false + }); + widget.serializeValue = () => undefined; + widget.options.host.updateImages(this.imgs); + } + return; + } + + if (widgetIdx > -1) { + this.widgets.splice(widgetIdx, 1); + } + const canvas = app.graph.list_of_graphcanvas[0]; const mouse = canvas.graph_mouse; if (!canvas.pointer_is_down && this.pointerDown) { @@ -547,31 +586,7 @@ export class ComfyApp { } else { cell_padding = 0; - let best = 0; - let w = this.imgs[0].naturalWidth; - let h = this.imgs[0].naturalHeight; - - // compact style - for (let c = 1; c <= numImages; c++) { - const rows = Math.ceil(numImages / c); - const cW = dw / c; - const cH = dh / rows; - const scaleX = cW / w; - const scaleY = cH / h; - - const scale = Math.min(scaleX, scaleY, 1); - const imageW = w * scale; - const imageH = h * scale; - const area = imageW * imageH * numImages; - - if (area > best) { - best = area; - cellWidth = imageW; - cellHeight = imageH; - cols = c; - shiftX = c * ((cW - imageW) / 2); - } - } + ({ cellWidth, cellHeight, cols, shiftX } = calculateImageGrid(this.imgs, dw, dh)); } let anyHovered = false; @@ -1272,6 +1287,7 @@ export class ComfyApp { canvasEl.tabIndex = "1"; document.body.prepend(canvasEl); + addDomClippingSetting(); this.#addProcessMouseHandler(); this.#addProcessKeyHandler(); this.#addConfigureHandler(); diff --git a/web/scripts/domWidget.js b/web/scripts/domWidget.js new file mode 100644 index 000000000..16f4e192e --- /dev/null +++ b/web/scripts/domWidget.js @@ -0,0 +1,312 @@ +import { app, ANIM_PREVIEW_WIDGET } from "./app.js"; + +const SIZE = Symbol(); + +function intersect(a, b) { + const x = Math.max(a.x, b.x); + const num1 = Math.min(a.x + a.width, b.x + b.width); + const y = Math.max(a.y, b.y); + const num2 = Math.min(a.y + a.height, b.y + b.height); + if (num1 >= x && num2 >= y) return [x, y, num1 - x, num2 - y]; + else return null; +} + +function getClipPath(node, element, elRect) { + const selectedNode = Object.values(app.canvas.selected_nodes)[0]; + if (selectedNode && selectedNode !== node) { + const MARGIN = 7; + const scale = app.canvas.ds.scale; + + const intersection = intersect( + { x: elRect.x / scale, y: elRect.y / scale, width: elRect.width / scale, height: elRect.height / scale }, + { + x: selectedNode.pos[0] + app.canvas.ds.offset[0] - MARGIN, + y: selectedNode.pos[1] + app.canvas.ds.offset[1] - LiteGraph.NODE_TITLE_HEIGHT - MARGIN, + width: selectedNode.size[0] + MARGIN + MARGIN, + height: selectedNode.size[1] + LiteGraph.NODE_TITLE_HEIGHT + MARGIN + MARGIN, + } + ); + + if (!intersection) { + return ""; + } + + const widgetRect = element.getBoundingClientRect(); + const clipX = intersection[0] - widgetRect.x / scale + "px"; + const clipY = intersection[1] - widgetRect.y / scale + "px"; + const clipWidth = intersection[2] + "px"; + const clipHeight = intersection[3] + "px"; + const path = `polygon(0% 0%, 0% 100%, ${clipX} 100%, ${clipX} ${clipY}, calc(${clipX} + ${clipWidth}) ${clipY}, calc(${clipX} + ${clipWidth}) calc(${clipY} + ${clipHeight}), ${clipX} calc(${clipY} + ${clipHeight}), ${clipX} 100%, 100% 100%, 100% 0%)`; + return path; + } + return ""; +} + +function computeSize(size) { + if (this.widgets?.[0].last_y == null) return; + + let y = this.widgets[0].last_y; + let freeSpace = size[1] - y; + + let widgetHeight = 0; + let dom = []; + for (const w of this.widgets) { + if (w.type === "converted-widget") { + // Ignore + delete w.computedHeight; + } else if (w.computeSize) { + widgetHeight += w.computeSize()[1] + 4; + } else if (w.element) { + // Extract DOM widget size info + const styles = getComputedStyle(w.element); + let minHeight = w.options.getMinHeight?.() ?? parseInt(styles.getPropertyValue("--comfy-widget-min-height")); + let maxHeight = w.options.getMaxHeight?.() ?? parseInt(styles.getPropertyValue("--comfy-widget-max-height")); + + let prefHeight = w.options.getHeight?.() ?? styles.getPropertyValue("--comfy-widget-height"); + if (prefHeight.endsWith?.("%")) { + prefHeight = size[1] * (parseFloat(prefHeight.substring(0, prefHeight.length - 1)) / 100); + } else { + prefHeight = parseInt(prefHeight); + if (isNaN(minHeight)) { + minHeight = prefHeight; + } + } + if (isNaN(minHeight)) { + minHeight = 50; + } + if (!isNaN(maxHeight)) { + if (!isNaN(prefHeight)) { + prefHeight = Math.min(prefHeight, maxHeight); + } else { + prefHeight = maxHeight; + } + } + dom.push({ + minHeight, + prefHeight, + w, + }); + } else { + widgetHeight += LiteGraph.NODE_WIDGET_HEIGHT + 4; + } + } + + freeSpace -= widgetHeight; + + // Calculate sizes with all widgets at their min height + const prefGrow = []; // Nodes that want to grow to their prefd size + const canGrow = []; // Nodes that can grow to auto size + let growBy = 0; + for (const d of dom) { + freeSpace -= d.minHeight; + if (isNaN(d.prefHeight)) { + canGrow.push(d); + d.w.computedHeight = d.minHeight; + } else { + const diff = d.prefHeight - d.minHeight; + if (diff > 0) { + prefGrow.push(d); + growBy += diff; + d.diff = diff; + } else { + d.w.computedHeight = d.minHeight; + } + } + } + + if (this.imgs && !this.widgets.find((w) => w.name === ANIM_PREVIEW_WIDGET)) { + // Allocate space for image + freeSpace -= 220; + } + + if (freeSpace < 0) { + // Not enough space for all widgets so we need to grow + size[1] -= freeSpace; + this.graph.setDirtyCanvas(true); + } else { + // Share the space between each + const growDiff = freeSpace - growBy; + if (growDiff > 0) { + // All pref sizes can be fulfilled + freeSpace = growDiff; + for (const d of prefGrow) { + d.w.computedHeight = d.prefHeight; + } + } else { + // We need to grow evenly + const shared = -growDiff / prefGrow.length; + for (const d of prefGrow) { + d.w.computedHeight = d.prefHeight - shared; + } + freeSpace = 0; + } + + if (freeSpace > 0 && canGrow.length) { + // Grow any that are auto height + const shared = freeSpace / canGrow.length; + for (const d of canGrow) { + d.w.computedHeight += shared; + } + } + } + + // Position each of the widgets + for (const w of this.widgets) { + w.y = y; + if (w.computedHeight) { + y += w.computedHeight; + } else if (w.computeSize) { + y += w.computeSize()[1] + 4; + } else { + y += LiteGraph.NODE_WIDGET_HEIGHT + 4; + } + } +} + +// Override the compute visible nodes function to allow us to hide/show DOM elements when the node goes offscreen +const elementWidgets = new Set(); +const computeVisibleNodes = LGraphCanvas.prototype.computeVisibleNodes; +LGraphCanvas.prototype.computeVisibleNodes = function () { + const visibleNodes = computeVisibleNodes.apply(this, arguments); + for (const node of app.graph._nodes) { + if (elementWidgets.has(node)) { + const hidden = visibleNodes.indexOf(node) === -1; + for (const w of node.widgets) { + if (w.element) { + w.element.hidden = hidden; + if (hidden) { + w.options.onHide?.(w); + } + } + } + } + } + + return visibleNodes; +}; + +let enableDomClipping = true; + +export function addDomClippingSetting() { + app.ui.settings.addSetting({ + id: "Comfy.DOMClippingEnabled", + name: "Enable DOM element clipping (enabling may reduce performance)", + type: "boolean", + defaultValue: enableDomClipping, + onChange(value) { + console.log("enableDomClipping", enableDomClipping); + enableDomClipping = !!value; + }, + }); +} + +LGraphNode.prototype.addDOMWidget = function (name, type, element, options) { + options = { hideOnZoom: true, selectOn: ["focus", "click"], ...options }; + + if (!element.parentElement) { + document.body.append(element); + } + + let mouseDownHandler; + if (element.blur) { + mouseDownHandler = (event) => { + if (!element.contains(event.target)) { + element.blur(); + } + }; + document.addEventListener("mousedown", mouseDownHandler); + } + + const widget = { + type, + name, + get value() { + return options.getValue?.() ?? undefined; + }, + set value(v) { + options.setValue?.(v); + widget.callback?.(widget.value); + }, + draw: function (ctx, node, widgetWidth, y, widgetHeight) { + if (widget.computedHeight == null) { + computeSize.call(node, node.size); + } + + const hidden = + (!!options.hideOnZoom && app.canvas.ds.scale < 0.5) || + widget.computedHeight <= 0 || + widget.type === "converted-widget"; + element.hidden = hidden; + element.style.display = hidden ? "none" : null; + if (hidden) { + widget.options.onHide?.(widget); + return; + } + + const margin = 10; + const elRect = ctx.canvas.getBoundingClientRect(); + const transform = new DOMMatrix() + .scaleSelf(elRect.width / ctx.canvas.width, elRect.height / ctx.canvas.height) + .multiplySelf(ctx.getTransform()) + .translateSelf(margin, margin + y); + + const scale = new DOMMatrix().scaleSelf(transform.a, transform.d); + + Object.assign(element.style, { + transformOrigin: "0 0", + transform: scale, + left: `${transform.a + transform.e}px`, + top: `${transform.d + transform.f}px`, + width: `${widgetWidth - margin * 2}px`, + height: `${(widget.computedHeight ?? 50) - margin * 2}px`, + position: "absolute", + zIndex: app.graph._nodes.indexOf(node), + }); + + if (enableDomClipping) { + element.style.clipPath = getClipPath(node, element, elRect); + element.style.willChange = "clip-path"; + } + + this.options.onDraw?.(widget); + }, + element, + options, + onRemove() { + if (mouseDownHandler) { + document.removeEventListener("mousedown", mouseDownHandler); + } + element.remove(); + }, + }; + + for (const evt of options.selectOn) { + element.addEventListener(evt, () => { + app.canvas.selectNode(this); + app.canvas.bringToFront(this); + }); + } + + this.addCustomWidget(widget); + elementWidgets.add(this); + + const onRemoved = this.onRemoved; + this.onRemoved = function () { + element.remove(); + elementWidgets.delete(this); + onRemoved?.apply(this, arguments); + }; + + if (!this[SIZE]) { + this[SIZE] = true; + const onResize = this.onResize; + this.onResize = function (size) { + options.beforeResize?.call(widget, this); + computeSize.call(this, size); + onResize?.apply(this, arguments); + options.afterResize?.call(widget, this); + }; + } + + return widget; +}; diff --git a/web/scripts/ui/imagePreview.js b/web/scripts/ui/imagePreview.js new file mode 100644 index 000000000..2a7f66b8f --- /dev/null +++ b/web/scripts/ui/imagePreview.js @@ -0,0 +1,97 @@ +import { $el } from "../ui.js"; + +export function calculateImageGrid(imgs, dw, dh) { + let best = 0; + let w = imgs[0].naturalWidth; + let h = imgs[0].naturalHeight; + const numImages = imgs.length; + + let cellWidth, cellHeight, cols, rows, shiftX; + // compact style + for (let c = 1; c <= numImages; c++) { + const r = Math.ceil(numImages / c); + const cW = dw / c; + const cH = dh / r; + const scaleX = cW / w; + const scaleY = cH / h; + + const scale = Math.min(scaleX, scaleY, 1); + const imageW = w * scale; + const imageH = h * scale; + const area = imageW * imageH * numImages; + + if (area > best) { + best = area; + cellWidth = imageW; + cellHeight = imageH; + cols = c; + rows = r; + shiftX = c * ((cW - imageW) / 2); + } + } + + return { cellWidth, cellHeight, cols, rows, shiftX }; +} + +export function createImageHost(node) { + const el = $el("div.comfy-img-preview"); + let currentImgs; + let first = true; + + function updateSize() { + let w = null; + let h = null; + + if (currentImgs) { + let elH = el.clientHeight; + if (first) { + first = false; + // On first run, if we are small then grow a bit + if (elH < 190) { + elH = 190; + } + el.style.setProperty("--comfy-widget-min-height", elH); + } else { + el.style.setProperty("--comfy-widget-min-height", null); + } + + const nw = node.size[0]; + ({ cellWidth: w, cellHeight: h } = calculateImageGrid(currentImgs, nw - 20, elH)); + w += "px"; + h += "px"; + + el.style.setProperty("--comfy-img-preview-width", w); + el.style.setProperty("--comfy-img-preview-height", h); + } + } + return { + el, + updateImages(imgs) { + if (imgs !== currentImgs) { + if (currentImgs == null) { + requestAnimationFrame(() => { + updateSize(); + }); + } + el.replaceChildren(...imgs); + currentImgs = imgs; + node.onResize(node.size); + node.graph.setDirtyCanvas(true, true); + } + }, + getHeight() { + updateSize(); + }, + onDraw() { + // Element from point uses a hittest find elements so we need to toggle pointer events + el.style.pointerEvents = "all"; + const over = document.elementFromPoint(app.canvas.mouse[0], app.canvas.mouse[1]); + el.style.pointerEvents = "none"; + + if(!over) return; + // Set the overIndex so Open Image etc work + const idx = currentImgs.indexOf(over); + node.overIndex = idx; + }, + }; +} diff --git a/web/scripts/widgets.js b/web/scripts/widgets.js index 36bc7ff7f..ccddc0bc4 100644 --- a/web/scripts/widgets.js +++ b/web/scripts/widgets.js @@ -1,4 +1,5 @@ import { api } from "./api.js" +import "./domWidget.js"; function getNumberDefaults(inputData, defaultStep, precision, enable_rounding) { let defaultVal = inputData[1]["default"]; @@ -97,166 +98,21 @@ function seedWidget(node, inputName, inputData, app) { seed.widget.linkedWidgets = [seedControl]; return seed; } - -const MultilineSymbol = Symbol(); -const MultilineResizeSymbol = Symbol(); - function addMultilineWidget(node, name, opts, app) { - const MIN_SIZE = 50; + const inputEl = document.createElement("textarea"); + inputEl.className = "comfy-multiline-input"; + inputEl.value = opts.defaultVal; + inputEl.placeholder = opts.placeholder || ""; - function computeSize(size) { - if (node.widgets[0].last_y == null) return; - - let y = node.widgets[0].last_y; - let freeSpace = size[1] - y; - - // Compute the height of all non customtext widgets - let widgetHeight = 0; - const multi = []; - for (let i = 0; i < node.widgets.length; i++) { - const w = node.widgets[i]; - if (w.type === "customtext") { - multi.push(w); - } else { - if (w.computeSize) { - widgetHeight += w.computeSize()[1] + 4; - } else { - widgetHeight += LiteGraph.NODE_WIDGET_HEIGHT + 4; - } - } - } - - // See how large each text input can be - freeSpace -= widgetHeight; - freeSpace /= multi.length + (!!node.imgs?.length); - - if (freeSpace < MIN_SIZE) { - // There isnt enough space for all the widgets, increase the size of the node - freeSpace = MIN_SIZE; - node.size[1] = y + widgetHeight + freeSpace * (multi.length + (!!node.imgs?.length)); - node.graph.setDirtyCanvas(true); - } - - // Position each of the widgets - for (const w of node.widgets) { - w.y = y; - if (w.type === "customtext") { - y += freeSpace; - w.computedHeight = freeSpace - multi.length*4; - } else if (w.computeSize) { - y += w.computeSize()[1] + 4; - } else { - y += LiteGraph.NODE_WIDGET_HEIGHT + 4; - } - } - - node.inputHeight = freeSpace; - } - - const widget = { - type: "customtext", - name, - get value() { - return this.inputEl.value; + const widget = node.addDOMWidget(name, "customtext", inputEl, { + getValue() { + return inputEl.value; }, - set value(x) { - this.inputEl.value = x; + setValue(v) { + inputEl.value = v; }, - draw: function (ctx, _, widgetWidth, y, widgetHeight) { - if (!this.parent.inputHeight) { - // If we are initially offscreen when created we wont have received a resize event - // Calculate it here instead - computeSize(node.size); - } - const visible = app.canvas.ds.scale > 0.5 && this.type === "customtext"; - const margin = 10; - const elRect = ctx.canvas.getBoundingClientRect(); - const transform = new DOMMatrix() - .scaleSelf(elRect.width / ctx.canvas.width, elRect.height / ctx.canvas.height) - .multiplySelf(ctx.getTransform()) - .translateSelf(margin, margin + y); - - const scale = new DOMMatrix().scaleSelf(transform.a, transform.d) - Object.assign(this.inputEl.style, { - transformOrigin: "0 0", - transform: scale, - left: `${transform.a + transform.e}px`, - top: `${transform.d + transform.f}px`, - width: `${widgetWidth - (margin * 2)}px`, - height: `${this.parent.inputHeight - (margin * 2)}px`, - position: "absolute", - background: (!node.color)?'':node.color, - color: (!node.color)?'':'white', - zIndex: app.graph._nodes.indexOf(node), - }); - this.inputEl.hidden = !visible; - }, - }; - widget.inputEl = document.createElement("textarea"); - widget.inputEl.className = "comfy-multiline-input"; - widget.inputEl.value = opts.defaultVal; - widget.inputEl.placeholder = opts.placeholder || ""; - document.addEventListener("mousedown", function (event) { - if (!widget.inputEl.contains(event.target)) { - widget.inputEl.blur(); - } }); - widget.parent = node; - document.body.appendChild(widget.inputEl); - - node.addCustomWidget(widget); - - app.canvas.onDrawBackground = function () { - // Draw node isnt fired once the node is off the screen - // if it goes off screen quickly, the input may not be removed - // this shifts it off screen so it can be moved back if the node is visible. - for (let n in app.graph._nodes) { - n = graph._nodes[n]; - for (let w in n.widgets) { - let wid = n.widgets[w]; - if (Object.hasOwn(wid, "inputEl")) { - wid.inputEl.style.left = -8000 + "px"; - wid.inputEl.style.position = "absolute"; - } - } - } - }; - - node.onRemoved = function () { - // When removing this node we need to remove the input from the DOM - for (let y in this.widgets) { - if (this.widgets[y].inputEl) { - this.widgets[y].inputEl.remove(); - } - } - }; - - widget.onRemove = () => { - widget.inputEl?.remove(); - - // Restore original size handler if we are the last - if (!--node[MultilineSymbol]) { - node.onResize = node[MultilineResizeSymbol]; - delete node[MultilineSymbol]; - delete node[MultilineResizeSymbol]; - } - }; - - if (node[MultilineSymbol]) { - node[MultilineSymbol]++; - } else { - node[MultilineSymbol] = 1; - const onResize = (node[MultilineResizeSymbol] = node.onResize); - - node.onResize = function (size) { - computeSize(size); - - // Call original resizer handler - if (onResize) { - onResize.apply(this, arguments); - } - }; - } + widget.inputEl = inputEl; return { minWidth: 400, minHeight: 200, widget }; } diff --git a/web/style.css b/web/style.css index 692fa31d6..378fe0a48 100644 --- a/web/style.css +++ b/web/style.css @@ -409,6 +409,21 @@ dialog::backdrop { width: calc(100% - 10px); } +.comfy-img-preview { + pointer-events: none; + overflow: hidden; + display: flex; + flex-wrap: wrap; + align-content: flex-start; + justify-content: center; +} + +.comfy-img-preview img { + object-fit: contain; + width: var(--comfy-img-preview-width); + height: var(--comfy-img-preview-height); +} + /* Search box */ .litegraph.litesearchbox { From 89e31abc46df00d10d48b8a4e36256fefd5973ed Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Tue, 21 Nov 2023 17:54:01 +0000 Subject: [PATCH 114/170] Fix clipping of collapsed nodes --- web/scripts/domWidget.js | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/web/scripts/domWidget.js b/web/scripts/domWidget.js index 16f4e192e..2f73e573e 100644 --- a/web/scripts/domWidget.js +++ b/web/scripts/domWidget.js @@ -17,13 +17,14 @@ function getClipPath(node, element, elRect) { const MARGIN = 7; const scale = app.canvas.ds.scale; + const bounding = selectedNode.getBounding(); const intersection = intersect( { x: elRect.x / scale, y: elRect.y / scale, width: elRect.width / scale, height: elRect.height / scale }, { x: selectedNode.pos[0] + app.canvas.ds.offset[0] - MARGIN, y: selectedNode.pos[1] + app.canvas.ds.offset[1] - LiteGraph.NODE_TITLE_HEIGHT - MARGIN, - width: selectedNode.size[0] + MARGIN + MARGIN, - height: selectedNode.size[1] + LiteGraph.NODE_TITLE_HEIGHT + MARGIN + MARGIN, + width: bounding[2] + MARGIN + MARGIN, + height: bounding[3] + MARGIN + MARGIN, } ); From cd4fc77d5f83867cdfb806f0c96c65ce8a84322c Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 21 Nov 2023 12:54:19 -0500 Subject: [PATCH 115/170] Add taesd and taesdxl to VAELoader node. They will show up if both the taesd_encoder and taesd_decoder or taesdxl model files are present in the models/vae_approx directory. --- comfy/sd.py | 17 ++++++++++---- comfy/taesd/taesd.py | 19 +++++++++++---- latent_preview.py | 5 +--- nodes.py | 55 +++++++++++++++++++++++++++++++++++++++++--- 4 files changed, 79 insertions(+), 17 deletions(-) diff --git a/comfy/sd.py b/comfy/sd.py index c3cc8e720..0f83cc581 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -23,6 +23,7 @@ import comfy.model_patcher import comfy.lora import comfy.t2i_adapter.adapter import comfy.supported_models_base +import comfy.taesd.taesd def load_model_weights(model, sd): m, u = model.load_state_dict(sd, strict=False) @@ -154,10 +155,16 @@ class VAE: if 'decoder.up_blocks.0.resnets.0.norm1.weight' in sd.keys(): #diffusers format sd = diffusers_convert.convert_vae_state_dict(sd) + self.memory_used_encode = lambda shape: (2078 * shape[2] * shape[3]) * 1.7 #These are for AutoencoderKL and need tweaking + self.memory_used_decode = lambda shape: (2562 * shape[2] * shape[3] * 64) * 1.7 + if config is None: - #default SD1.x/SD2.x VAE parameters - ddconfig = {'double_z': True, 'z_channels': 4, 'resolution': 256, 'in_channels': 3, 'out_ch': 3, 'ch': 128, 'ch_mult': [1, 2, 4, 4], 'num_res_blocks': 2, 'attn_resolutions': [], 'dropout': 0.0} - self.first_stage_model = AutoencoderKL(ddconfig=ddconfig, embed_dim=4) + if "taesd_decoder.1.weight" in sd: + self.first_stage_model = comfy.taesd.taesd.TAESD() + else: + #default SD1.x/SD2.x VAE parameters + ddconfig = {'double_z': True, 'z_channels': 4, 'resolution': 256, 'in_channels': 3, 'out_ch': 3, 'ch': 128, 'ch_mult': [1, 2, 4, 4], 'num_res_blocks': 2, 'attn_resolutions': [], 'dropout': 0.0} + self.first_stage_model = AutoencoderKL(ddconfig=ddconfig, embed_dim=4) else: self.first_stage_model = AutoencoderKL(**(config['params'])) self.first_stage_model = self.first_stage_model.eval() @@ -206,7 +213,7 @@ class VAE: def decode(self, samples_in): self.first_stage_model = self.first_stage_model.to(self.device) try: - memory_used = (2562 * samples_in.shape[2] * samples_in.shape[3] * 64) * 1.7 + memory_used = self.memory_used_decode(samples_in.shape) model_management.free_memory(memory_used, self.device) free_memory = model_management.get_free_memory(self.device) batch_number = int(free_memory / memory_used) @@ -234,7 +241,7 @@ class VAE: self.first_stage_model = self.first_stage_model.to(self.device) pixel_samples = pixel_samples.movedim(-1,1) try: - memory_used = (2078 * pixel_samples.shape[2] * pixel_samples.shape[3]) * 1.7 #NOTE: this constant along with the one in the decode above are estimated from the mem usage for the VAE and could change. + memory_used = self.memory_used_encode(pixel_samples.shape) #NOTE: this constant along with the one in the decode above are estimated from the mem usage for the VAE and could change. model_management.free_memory(memory_used, self.device) free_memory = model_management.get_free_memory(self.device) batch_number = int(free_memory / memory_used) diff --git a/comfy/taesd/taesd.py b/comfy/taesd/taesd.py index 8df1f1609..46f3097a2 100644 --- a/comfy/taesd/taesd.py +++ b/comfy/taesd/taesd.py @@ -46,15 +46,16 @@ class TAESD(nn.Module): latent_magnitude = 3 latent_shift = 0.5 - def __init__(self, encoder_path="taesd_encoder.pth", decoder_path="taesd_decoder.pth"): + def __init__(self, encoder_path=None, decoder_path=None): """Initialize pretrained TAESD on the given device from the given checkpoints.""" super().__init__() - self.encoder = Encoder() - self.decoder = Decoder() + self.taesd_encoder = Encoder() + self.taesd_decoder = Decoder() + self.vae_scale = torch.nn.Parameter(torch.tensor(1.0)) if encoder_path is not None: - self.encoder.load_state_dict(comfy.utils.load_torch_file(encoder_path, safe_load=True)) + self.taesd_encoder.load_state_dict(comfy.utils.load_torch_file(encoder_path, safe_load=True)) if decoder_path is not None: - self.decoder.load_state_dict(comfy.utils.load_torch_file(decoder_path, safe_load=True)) + self.taesd_decoder.load_state_dict(comfy.utils.load_torch_file(decoder_path, safe_load=True)) @staticmethod def scale_latents(x): @@ -65,3 +66,11 @@ class TAESD(nn.Module): def unscale_latents(x): """[0, 1] -> raw latents""" return x.sub(TAESD.latent_shift).mul(2 * TAESD.latent_magnitude) + + def decode(self, x): + x_sample = self.taesd_decoder(x * self.vae_scale) + x_sample = x_sample.sub(0.5).mul(2) + return x_sample + + def encode(self, x): + return self.taesd_encoder(x * 0.5 + 0.5) / self.vae_scale diff --git a/latent_preview.py b/latent_preview.py index 6e758a1a9..61754751e 100644 --- a/latent_preview.py +++ b/latent_preview.py @@ -22,10 +22,7 @@ class TAESDPreviewerImpl(LatentPreviewer): self.taesd = taesd def decode_latent_to_preview(self, x0): - x_sample = self.taesd.decoder(x0[:1])[0].detach() - # x_sample = self.taesd.unscale_latents(x_sample).div(4).add(0.5) # returns value in [-2, 2] - x_sample = x_sample.sub(0.5).mul(2) - + x_sample = self.taesd.decode(x0[:1])[0].detach() x_sample = torch.clamp((x_sample + 1.0) / 2.0, min=0.0, max=1.0) x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2) x_sample = x_sample.astype(np.uint8) diff --git a/nodes.py b/nodes.py index 2adc5e073..2de468da7 100644 --- a/nodes.py +++ b/nodes.py @@ -573,9 +573,55 @@ class LoraLoader: return (model_lora, clip_lora) class VAELoader: + @staticmethod + def vae_list(): + vaes = folder_paths.get_filename_list("vae") + approx_vaes = folder_paths.get_filename_list("vae_approx") + sdxl_taesd_enc = False + sdxl_taesd_dec = False + sd1_taesd_enc = False + sd1_taesd_dec = False + + for v in approx_vaes: + if v.startswith("taesd_decoder."): + sd1_taesd_dec = True + elif v.startswith("taesd_encoder."): + sd1_taesd_enc = True + elif v.startswith("taesdxl_decoder."): + sdxl_taesd_dec = True + elif v.startswith("taesdxl_encoder."): + sdxl_taesd_enc = True + if sd1_taesd_dec and sd1_taesd_enc: + vaes.append("taesd") + if sdxl_taesd_dec and sdxl_taesd_enc: + vaes.append("taesdxl") + return vaes + + @staticmethod + def load_taesd(name): + sd = {} + approx_vaes = folder_paths.get_filename_list("vae_approx") + + encoder = next(filter(lambda a: a.startswith("{}_encoder.".format(name)), approx_vaes)) + decoder = next(filter(lambda a: a.startswith("{}_decoder.".format(name)), approx_vaes)) + + enc = comfy.utils.load_torch_file(folder_paths.get_full_path("vae_approx", encoder)) + for k in enc: + sd["taesd_encoder.{}".format(k)] = enc[k] + + dec = comfy.utils.load_torch_file(folder_paths.get_full_path("vae_approx", decoder)) + for k in dec: + sd["taesd_decoder.{}".format(k)] = dec[k] + + if name == "taesd": + sd["vae_scale"] = torch.tensor(0.18215) + elif name == "taesdxl": + sd["vae_scale"] = torch.tensor(0.13025) + return sd + @classmethod def INPUT_TYPES(s): - return {"required": { "vae_name": (folder_paths.get_filename_list("vae"), )}} + return {"required": { "vae_name": (s.vae_list(), )}} RETURN_TYPES = ("VAE",) FUNCTION = "load_vae" @@ -583,8 +629,11 @@ class VAELoader: #TODO: scale factor? def load_vae(self, vae_name): - vae_path = folder_paths.get_full_path("vae", vae_name) - sd = comfy.utils.load_torch_file(vae_path) + if vae_name in ["taesd", "taesdxl"]: + sd = self.load_taesd(vae_name) + else: + vae_path = folder_paths.get_full_path("vae", vae_name) + sd = comfy.utils.load_torch_file(vae_path) vae = comfy.sd.VAE(sd=sd) return (vae,) From 6a491ebe2729c675322491e255a72d5ac0ef5bf6 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 21 Nov 2023 16:29:18 -0500 Subject: [PATCH 116/170] Allow model config to preprocess the vae state dict on load. --- comfy/sd.py | 1 + comfy/supported_models_base.py | 3 +++ 2 files changed, 4 insertions(+) diff --git a/comfy/sd.py b/comfy/sd.py index 0f83cc581..c006a0362 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -448,6 +448,7 @@ def load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, o if output_vae: vae_sd = comfy.utils.state_dict_prefix_replace(sd, {"first_stage_model.": ""}, filter_keys=True) + vae_sd = model_config.process_vae_state_dict(vae_sd) vae = VAE(sd=vae_sd) if output_clip: diff --git a/comfy/supported_models_base.py b/comfy/supported_models_base.py index 6dfae0343..b073eb4fc 100644 --- a/comfy/supported_models_base.py +++ b/comfy/supported_models_base.py @@ -56,6 +56,9 @@ class BASE: def process_unet_state_dict(self, state_dict): return state_dict + def process_vae_state_dict(self, state_dict): + return state_dict + def process_clip_state_dict_for_saving(self, state_dict): replace_prefix = {"": "cond_stage_model."} return utils.state_dict_prefix_replace(state_dict, replace_prefix) From 72741105a687c67137eb5d7a38840b8373d82e61 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 21 Nov 2023 17:18:49 -0500 Subject: [PATCH 117/170] Remove useless code. --- .../modules/diffusionmodules/openaimodel.py | 31 +++++++------------ 1 file changed, 11 insertions(+), 20 deletions(-) diff --git a/comfy/ldm/modules/diffusionmodules/openaimodel.py b/comfy/ldm/modules/diffusionmodules/openaimodel.py index 10eb68d73..e8f35a540 100644 --- a/comfy/ldm/modules/diffusionmodules/openaimodel.py +++ b/comfy/ldm/modules/diffusionmodules/openaimodel.py @@ -28,25 +28,6 @@ class TimestepBlock(nn.Module): Apply the module to `x` given `emb` timestep embeddings. """ - -class TimestepEmbedSequential(nn.Sequential, TimestepBlock): - """ - A sequential module that passes timestep embeddings to the children that - support it as an extra input. - """ - - def forward(self, x, emb, context=None, transformer_options={}, output_shape=None): - for layer in self: - if isinstance(layer, TimestepBlock): - x = layer(x, emb) - elif isinstance(layer, SpatialTransformer): - x = layer(x, context, transformer_options) - elif isinstance(layer, Upsample): - x = layer(x, output_shape=output_shape) - else: - x = layer(x) - return x - #This is needed because accelerate makes a copy of transformer_options which breaks "current_index" def forward_timestep_embed(ts, x, emb, context=None, transformer_options={}, output_shape=None): for layer in ts: @@ -54,13 +35,23 @@ def forward_timestep_embed(ts, x, emb, context=None, transformer_options={}, out x = layer(x, emb) elif isinstance(layer, SpatialTransformer): x = layer(x, context, transformer_options) - transformer_options["current_index"] += 1 + if "current_index" in transformer_options: + transformer_options["current_index"] += 1 elif isinstance(layer, Upsample): x = layer(x, output_shape=output_shape) else: x = layer(x) return x +class TimestepEmbedSequential(nn.Sequential, TimestepBlock): + """ + A sequential module that passes timestep embeddings to the children that + support it as an extra input. + """ + + def forward(self, *args, **kwargs): + return forward_timestep_embed(self, *args, **kwargs) + class Upsample(nn.Module): """ An upsampling layer with an optional convolution. From c3ae99a749fa1e9a6dbb96c69c65c6fcf2507af3 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 22 Nov 2023 03:23:16 -0500 Subject: [PATCH 118/170] Allow controlling downscale and upscale methods in PatchModelAddDownscale. --- comfy/utils.py | 6 ++++-- comfy_extras/nodes_model_downscale.py | 10 +++++++--- 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/comfy/utils.py b/comfy/utils.py index f4c0ab419..294bbb425 100644 --- a/comfy/utils.py +++ b/comfy/utils.py @@ -318,7 +318,9 @@ def bislerp(samples, width, height): coords_2 = torch.nn.functional.interpolate(coords_2, size=(1, length_new), mode="bilinear") coords_2 = coords_2.to(torch.int64) return ratios, coords_1, coords_2 - + + orig_dtype = samples.dtype + samples = samples.float() n,c,h,w = samples.shape h_new, w_new = (height, width) @@ -347,7 +349,7 @@ def bislerp(samples, width, height): result = slerp(pass_1, pass_2, ratios) result = result.reshape(n, h_new, w_new, c).movedim(-1, 1) - return result + return result.to(orig_dtype) def lanczos(samples, width, height): images = [Image.fromarray(np.clip(255. * image.movedim(0, -1).cpu().numpy(), 0, 255).astype(np.uint8)) for image in samples] diff --git a/comfy_extras/nodes_model_downscale.py b/comfy_extras/nodes_model_downscale.py index f65ef05e1..48bcc6892 100644 --- a/comfy_extras/nodes_model_downscale.py +++ b/comfy_extras/nodes_model_downscale.py @@ -1,6 +1,8 @@ import torch +import comfy.utils class PatchModelAddDownscale: + upscale_methods = ["bicubic", "nearest-exact", "bilinear", "area", "bislerp"] @classmethod def INPUT_TYPES(s): return {"required": { "model": ("MODEL",), @@ -9,13 +11,15 @@ class PatchModelAddDownscale: "start_percent": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}), "end_percent": ("FLOAT", {"default": 0.35, "min": 0.0, "max": 1.0, "step": 0.001}), "downscale_after_skip": ("BOOLEAN", {"default": True}), + "downscale_method": (s.upscale_methods,), + "upscale_method": (s.upscale_methods,), }} RETURN_TYPES = ("MODEL",) FUNCTION = "patch" CATEGORY = "_for_testing" - def patch(self, model, block_number, downscale_factor, start_percent, end_percent, downscale_after_skip): + def patch(self, model, block_number, downscale_factor, start_percent, end_percent, downscale_after_skip, downscale_method, upscale_method): sigma_start = model.model.model_sampling.percent_to_sigma(start_percent) sigma_end = model.model.model_sampling.percent_to_sigma(end_percent) @@ -23,12 +27,12 @@ class PatchModelAddDownscale: if transformer_options["block"][1] == block_number: sigma = transformer_options["sigmas"][0].item() if sigma <= sigma_start and sigma >= sigma_end: - h = torch.nn.functional.interpolate(h, scale_factor=(1.0 / downscale_factor), mode="bicubic", align_corners=False) + h = comfy.utils.common_upscale(h, round(h.shape[-1] * (1.0 / downscale_factor)), round(h.shape[-2] * (1.0 / downscale_factor)), downscale_method, "disabled") return h def output_block_patch(h, hsp, transformer_options): if h.shape[2] != hsp.shape[2]: - h = torch.nn.functional.interpolate(h, size=(hsp.shape[2], hsp.shape[3]), mode="bicubic", align_corners=False) + h = comfy.utils.common_upscale(h, hsp.shape[-1], hsp.shape[-2], upscale_method, "disabled") return h, hsp m = model.clone() From ab7d4f784892c275e888d71aa80a3a2ed59d9b83 Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Wed, 22 Nov 2023 13:53:30 +0000 Subject: [PATCH 119/170] Handle collapsing to hide element --- web/scripts/domWidget.js | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/web/scripts/domWidget.js b/web/scripts/domWidget.js index 16f4e192e..0f8a2eb01 100644 --- a/web/scripts/domWidget.js +++ b/web/scripts/domWidget.js @@ -233,6 +233,7 @@ LGraphNode.prototype.addDOMWidget = function (name, type, element, options) { } const hidden = + node.flags?.collapsed || (!!options.hideOnZoom && app.canvas.ds.scale < 0.5) || widget.computedHeight <= 0 || widget.type === "converted-widget"; @@ -290,6 +291,15 @@ LGraphNode.prototype.addDOMWidget = function (name, type, element, options) { this.addCustomWidget(widget); elementWidgets.add(this); + const collapse = this.collapse; + this.collapse = function() { + collapse.apply(this, arguments); + if(this.flags?.collapsed) { + element.hidden = true; + element.style.display = "none"; + } + } + const onRemoved = this.onRemoved; this.onRemoved = function () { element.remove(); From 70d2ea0faa28e1727f7535466ac5378e786b32cb Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Wed, 22 Nov 2023 17:52:20 +0000 Subject: [PATCH 120/170] Control filter list (#2009) * Add control_filter_list to filter items after queue * fix regex * backwards compatibility * formatting * revert * Add and fix test --- tests-ui/tests/widgetInputs.test.js | 96 ++++++++++++++++++++++++++--- web/extensions/core/widgetInputs.js | 8 ++- web/scripts/widgets.js | 56 ++++++++++++++--- 3 files changed, 141 insertions(+), 19 deletions(-) diff --git a/tests-ui/tests/widgetInputs.test.js b/tests-ui/tests/widgetInputs.test.js index 022e54926..e1873105a 100644 --- a/tests-ui/tests/widgetInputs.test.js +++ b/tests-ui/tests/widgetInputs.test.js @@ -14,10 +14,10 @@ const lg = require("../utils/litegraph"); * @param { InstanceType } graph * @param { InstanceType } input * @param { string } widgetType - * @param { boolean } hasControlWidget + * @param { number } controlWidgetCount * @returns */ -async function connectPrimitiveAndReload(ez, graph, input, widgetType, hasControlWidget) { +async function connectPrimitiveAndReload(ez, graph, input, widgetType, controlWidgetCount = 0) { // Connect to primitive and ensure its still connected after let primitive = ez.PrimitiveNode(); primitive.outputs[0].connectTo(input); @@ -33,13 +33,17 @@ async function connectPrimitiveAndReload(ez, graph, input, widgetType, hasContro expect(valueWidget.widget.type).toBe(widgetType); // Check if control_after_generate should be added - if (hasControlWidget) { + if (controlWidgetCount) { const controlWidget = primitive.widgets.control_after_generate; expect(controlWidget.widget.type).toBe("combo"); + if(widgetType === "combo") { + const filterWidget = primitive.widgets.control_filter_list; + expect(filterWidget.widget.type).toBe("string"); + } } // Ensure we dont have other widgets - expect(primitive.node.widgets).toHaveLength(1 + +!!hasControlWidget); + expect(primitive.node.widgets).toHaveLength(1 + controlWidgetCount); }); return primitive; @@ -55,8 +59,8 @@ describe("widget inputs", () => { }); [ - { name: "int", type: "INT", widget: "number", control: true }, - { name: "float", type: "FLOAT", widget: "number", control: true }, + { name: "int", type: "INT", widget: "number", control: 1 }, + { name: "float", type: "FLOAT", widget: "number", control: 1 }, { name: "text", type: "STRING" }, { name: "customtext", @@ -64,7 +68,7 @@ describe("widget inputs", () => { opt: { multiline: true }, }, { name: "toggle", type: "BOOLEAN" }, - { name: "combo", type: ["a", "b", "c"], control: true }, + { name: "combo", type: ["a", "b", "c"], control: 2 }, ].forEach((c) => { test(`widget conversion + primitive works on ${c.name}`, async () => { const { ez, graph } = await start({ @@ -106,7 +110,7 @@ describe("widget inputs", () => { n.widgets.ckpt_name.convertToInput(); expect(n.inputs.length).toEqual(inputCount + 1); - const primitive = await connectPrimitiveAndReload(ez, graph, n.inputs.ckpt_name, "combo", true); + const primitive = await connectPrimitiveAndReload(ez, graph, n.inputs.ckpt_name, "combo", 2); // Disconnect & reconnect primitive.outputs[0].connections[0].disconnect(); @@ -226,7 +230,7 @@ describe("widget inputs", () => { // Reload and ensure it still only has 1 converted widget if (!assertNotNullOrUndefined(input)) return; - await connectPrimitiveAndReload(ez, graph, input, "number", true); + await connectPrimitiveAndReload(ez, graph, input, "number", 1); n = graph.find(n); expect(n.widgets).toHaveLength(1); w = n.widgets.example; @@ -258,7 +262,7 @@ describe("widget inputs", () => { // Reload and ensure it still only has 1 converted widget if (assertNotNullOrUndefined(input)) { - await connectPrimitiveAndReload(ez, graph, input, "number", true); + await connectPrimitiveAndReload(ez, graph, input, "number", 1); n = graph.find(n); expect(n.widgets).toHaveLength(1); expect(n.widgets.example.isConvertedToInput).toBeTruthy(); @@ -316,4 +320,76 @@ describe("widget inputs", () => { n1.outputs[0].connectTo(n2.inputs[0]); expect(() => n1.outputs[0].connectTo(n3.inputs[0])).toThrow(); }); + + test("combo primitive can filter list when control_after_generate called", async () => { + const { ez } = await start({ + mockNodeDefs: { + ...makeNodeDef("TestNode1", { example: [["A", "B", "C", "D", "AA", "BB", "CC", "DD", "AAA", "BBB"], {}] }), + }, + }); + + const n1 = ez.TestNode1(); + n1.widgets.example.convertToInput(); + const p = ez.PrimitiveNode() + p.outputs[0].connectTo(n1.inputs[0]); + + const value = p.widgets.value; + const control = p.widgets.control_after_generate.widget; + const filter = p.widgets.control_filter_list; + + expect(p.widgets.length).toBe(3); + control.value = "increment"; + expect(value.value).toBe("A"); + + // Manually trigger after queue when set to increment + control["afterQueued"](); + expect(value.value).toBe("B"); + + // Filter to items containing D + filter.value = "D"; + control["afterQueued"](); + expect(value.value).toBe("D"); + control["afterQueued"](); + expect(value.value).toBe("DD"); + + // Check decrement + value.value = "BBB"; + control.value = "decrement"; + filter.value = "B"; + control["afterQueued"](); + expect(value.value).toBe("BB"); + control["afterQueued"](); + expect(value.value).toBe("B"); + + // Check regex works + value.value = "BBB"; + filter.value = "/[AB]|^C$/"; + control["afterQueued"](); + expect(value.value).toBe("AAA"); + control["afterQueued"](); + expect(value.value).toBe("BB"); + control["afterQueued"](); + expect(value.value).toBe("AA"); + control["afterQueued"](); + expect(value.value).toBe("C"); + control["afterQueued"](); + expect(value.value).toBe("B"); + control["afterQueued"](); + expect(value.value).toBe("A"); + + // Check random + control.value = "randomize"; + filter.value = "/D/"; + for(let i = 0; i < 100; i++) { + control["afterQueued"](); + expect(value.value === "D" || value.value === "DD").toBeTruthy(); + } + + // Ensure it doesnt apply when fixed + control.value = "fixed"; + value.value = "B"; + filter.value = "C"; + control["afterQueued"](); + expect(value.value).toBe("B"); + }); }); diff --git a/web/extensions/core/widgetInputs.js b/web/extensions/core/widgetInputs.js index bad3ac3a7..5c8fbc9b2 100644 --- a/web/extensions/core/widgetInputs.js +++ b/web/extensions/core/widgetInputs.js @@ -1,4 +1,4 @@ -import { ComfyWidgets, addValueControlWidget } from "../../scripts/widgets.js"; +import { ComfyWidgets, addValueControlWidgets } from "../../scripts/widgets.js"; import { app } from "../../scripts/app.js"; const CONVERTED_TYPE = "converted-widget"; @@ -467,7 +467,11 @@ app.registerExtension({ if (!control_value) { control_value = "fixed"; } - addValueControlWidget(this, widget, control_value); + addValueControlWidgets(this, widget, control_value); + let filter = this.widgets_values?.[2]; + if(filter && this.widgets.length === 3) { + this.widgets[2].value = filter; + } } // When our value changes, update other widgets to reflect our changes diff --git a/web/scripts/widgets.js b/web/scripts/widgets.js index ccddc0bc4..fbc1d0fc3 100644 --- a/web/scripts/widgets.js +++ b/web/scripts/widgets.js @@ -24,17 +24,58 @@ function getNumberDefaults(inputData, defaultStep, precision, enable_rounding) { } export function addValueControlWidget(node, targetWidget, defaultValue = "randomize", values) { - const valueControl = node.addWidget("combo", "control_after_generate", defaultValue, function (v) { }, { + const widgets = addValueControlWidgets(node, targetWidget, defaultValue, values, { + addFilterList: false, + }); + return widgets[0]; +} + +export function addValueControlWidgets(node, targetWidget, defaultValue = "randomize", values, options) { + if (!options) options = {}; + + const widgets = []; + const valueControl = node.addWidget("combo", "control_after_generate", defaultValue, function (v) { }, { values: ["fixed", "increment", "decrement", "randomize"], serialize: false, // Don't include this in prompt. }); - valueControl.afterQueued = () => { + widgets.push(valueControl); + const isCombo = targetWidget.type === "combo"; + let comboFilter; + if (isCombo && options.addFilterList !== false) { + comboFilter = node.addWidget("string", "control_filter_list", "", function (v) {}, { + serialize: false, // Don't include this in prompt. + }); + widgets.push(comboFilter); + } + + valueControl.afterQueued = () => { var v = valueControl.value; - if (targetWidget.type == "combo" && v !== "fixed") { - let current_index = targetWidget.options.values.indexOf(targetWidget.value); - let current_length = targetWidget.options.values.length; + if (isCombo && v !== "fixed") { + let values = targetWidget.options.values; + const filter = comboFilter?.value; + if (filter) { + let check; + if (filter.startsWith("/") && filter.endsWith("/")) { + try { + const regex = new RegExp(filter.substring(1, filter.length - 1)); + check = (item) => regex.test(item); + } catch (error) { + console.error("Error constructing RegExp filter for node " + node.id, filter, error); + } + } + if (!check) { + const lower = filter.toLocaleLowerCase(); + check = (item) => item.toLocaleLowerCase().includes(lower); + } + values = values.filter(item => check(item)); + if (!values.length && targetWidget.options.values.length) { + console.warn("Filter for node " + node.id + " has filtered out all items", filter); + } + } + let current_index = values.indexOf(targetWidget.value); + let current_length = values.length; switch (v) { case "increment": @@ -51,7 +92,7 @@ export function addValueControlWidget(node, targetWidget, defaultValue = "random current_index = Math.max(0, current_index); current_index = Math.min(current_length - 1, current_index); if (current_index >= 0) { - let value = targetWidget.options.values[current_index]; + let value = values[current_index]; targetWidget.value = value; targetWidget.callback(value); } @@ -88,7 +129,8 @@ export function addValueControlWidget(node, targetWidget, defaultValue = "random targetWidget.callback(targetWidget.value); } } - return valueControl; + + return widgets; }; function seedWidget(node, inputName, inputData, app) { From 32447f0c392be6a6b64fbac09fd7e7f33eb451f8 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 22 Nov 2023 17:23:37 -0500 Subject: [PATCH 121/170] Add sampling_settings so models can specify specific sampling settings. --- comfy/model_sampling.py | 2 +- comfy/supported_models_base.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/comfy/model_sampling.py b/comfy/model_sampling.py index 37a3ac725..9e2a1c1af 100644 --- a/comfy/model_sampling.py +++ b/comfy/model_sampling.py @@ -24,7 +24,7 @@ class ModelSamplingDiscrete(torch.nn.Module): super().__init__() beta_schedule = "linear" if model_config is not None: - beta_schedule = model_config.beta_schedule + beta_schedule = model_config.sampling_settings.get("beta_schedule", beta_schedule) self._register_schedule(given_betas=None, beta_schedule=beta_schedule, timesteps=1000, linear_start=0.00085, linear_end=0.012, cosine_s=8e-3) self.sigma_data = 1.0 diff --git a/comfy/supported_models_base.py b/comfy/supported_models_base.py index b073eb4fc..3412cfea0 100644 --- a/comfy/supported_models_base.py +++ b/comfy/supported_models_base.py @@ -19,7 +19,7 @@ class BASE: clip_prefix = [] clip_vision_prefix = None noise_aug_config = None - beta_schedule = "linear" + sampling_settings = {} latent_format = latent_formats.LatentFormat @classmethod From 410bf0777197c7005fe13aa4f6717d6dc63e2b22 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 22 Nov 2023 18:16:02 -0500 Subject: [PATCH 122/170] Make VAE memory estimation take dtype into account. --- comfy/sd.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/comfy/sd.py b/comfy/sd.py index c006a0362..a8df3bdd4 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -155,8 +155,8 @@ class VAE: if 'decoder.up_blocks.0.resnets.0.norm1.weight' in sd.keys(): #diffusers format sd = diffusers_convert.convert_vae_state_dict(sd) - self.memory_used_encode = lambda shape: (2078 * shape[2] * shape[3]) * 1.7 #These are for AutoencoderKL and need tweaking - self.memory_used_decode = lambda shape: (2562 * shape[2] * shape[3] * 64) * 1.7 + self.memory_used_encode = lambda shape, dtype: (1767 * shape[2] * shape[3]) * model_management.dtype_size(dtype) #These are for AutoencoderKL and need tweaking (should be lower) + self.memory_used_decode = lambda shape, dtype: (2178 * shape[2] * shape[3] * 64) * model_management.dtype_size(dtype) if config is None: if "taesd_decoder.1.weight" in sd: @@ -213,7 +213,7 @@ class VAE: def decode(self, samples_in): self.first_stage_model = self.first_stage_model.to(self.device) try: - memory_used = self.memory_used_decode(samples_in.shape) + memory_used = self.memory_used_decode(samples_in.shape, self.vae_dtype) model_management.free_memory(memory_used, self.device) free_memory = model_management.get_free_memory(self.device) batch_number = int(free_memory / memory_used) @@ -241,7 +241,7 @@ class VAE: self.first_stage_model = self.first_stage_model.to(self.device) pixel_samples = pixel_samples.movedim(-1,1) try: - memory_used = self.memory_used_encode(pixel_samples.shape) #NOTE: this constant along with the one in the decode above are estimated from the mem usage for the VAE and could change. + memory_used = self.memory_used_encode(pixel_samples.shape, self.vae_dtype) model_management.free_memory(memory_used, self.device) free_memory = model_management.get_free_memory(self.device) batch_number = int(free_memory / memory_used) From d03d8aa2e348c6ba3333150eb18aa76f5180a7f0 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 23 Nov 2023 01:09:15 -0500 Subject: [PATCH 123/170] Fix loading groups. --- web/lib/litegraph.core.js | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/web/lib/litegraph.core.js b/web/lib/litegraph.core.js index 0ca203842..f571edb30 100644 --- a/web/lib/litegraph.core.js +++ b/web/lib/litegraph.core.js @@ -4928,7 +4928,9 @@ LGraphNode.prototype.executeAction = function(action) this.title = o.title; this._bounding.set(o.bounding); this.color = o.color; - this.font_size = o.font_size; + if (o.font_size) { + this.font_size = o.font_size; + } }; LGraphGroup.prototype.serialize = function() { From 87031a1945278abe6b8a8058dfe6f38a5138655c Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 23 Nov 2023 11:59:11 -0500 Subject: [PATCH 124/170] Update readme with link to LCM example page. --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index d622c9072..f87c0404f 100644 --- a/README.md +++ b/README.md @@ -30,6 +30,7 @@ This ui will let you design and execute advanced stable diffusion pipelines usin - [unCLIP Models](https://comfyanonymous.github.io/ComfyUI_examples/unclip/) - [GLIGEN](https://comfyanonymous.github.io/ComfyUI_examples/gligen/) - [Model Merging](https://comfyanonymous.github.io/ComfyUI_examples/model_merging/) +- [LCM models and Loras](https://comfyanonymous.github.io/ComfyUI_examples/lcm/) - Latent previews with [TAESD](#how-to-show-high-quality-previews) - Starts up very fast. - Works fully offline: will never download anything. From a657f96c5cd9d72725352d6b00def82d9ce5d556 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 23 Nov 2023 13:55:29 -0500 Subject: [PATCH 125/170] Add a node to save animated webp. --- comfy_extras/nodes_images.py | 76 ++++++++++++++++++++++++++++++++++++ web/scripts/pnginfo.js | 4 +- 2 files changed, 79 insertions(+), 1 deletion(-) diff --git a/comfy_extras/nodes_images.py b/comfy_extras/nodes_images.py index 8cb322327..18c579190 100644 --- a/comfy_extras/nodes_images.py +++ b/comfy_extras/nodes_images.py @@ -1,4 +1,12 @@ import nodes +import folder_paths +from comfy.cli_args import args + +from PIL import Image +import numpy as np +import json +import os + MAX_RESOLUTION = nodes.MAX_RESOLUTION class ImageCrop: @@ -38,7 +46,75 @@ class RepeatImageBatch: s = image.repeat((amount, 1,1,1)) return (s,) +class SaveAnimatedWEBP: + def __init__(self): + self.output_dir = folder_paths.get_output_directory() + self.type = "output" + self.prefix_append = "" + + methods = {"default": 4, "fastest": 0, "slowest": 6} + @classmethod + def INPUT_TYPES(s): + return {"required": + {"images": ("IMAGE", ), + "filename_prefix": ("STRING", {"default": "ComfyUI"}), + "fps": ("FLOAT", {"default": 6.0, "min": 0.01, "max": 1000.0, "step": 0.01}), + "lossless": ("BOOLEAN", {"default": True}), + "quality": ("INT", {"default": 80, "min": 0, "max": 100}), + "method": (list(s.methods.keys()),), + # "num_frames": ("INT", {"default": 0, "min": 0, "max": 8192}), + }, + "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}, + } + + RETURN_TYPES = () + FUNCTION = "save_images" + + OUTPUT_NODE = True + + CATEGORY = "_for_testing" + + def save_images(self, images, fps, filename_prefix, lossless, quality, method, num_frames=0, prompt=None, extra_pnginfo=None): + method = self.methods.get(method, "aoeu") + filename_prefix += self.prefix_append + full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir, images[0].shape[1], images[0].shape[0]) + results = list() + pil_images = [] + for image in images: + i = 255. * image.cpu().numpy() + img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8)) + pil_images.append(img) + + metadata = None + if not args.disable_metadata: + metadata = pil_images[0].getexif() + if prompt is not None: + metadata[0x0110] = "prompt:{}".format(json.dumps(prompt)) + if extra_pnginfo is not None: + inital_exif = 0x010f + for x in extra_pnginfo: + metadata[inital_exif] = "{}:{}".format(x, json.dumps(extra_pnginfo[x])) + inital_exif -= 1 + + if num_frames == 0: + num_frames = len(pil_images) + + c = len(pil_images) + for i in range(0, c, num_frames): + file = f"{filename}_{counter:05}_.webp" + pil_images[i].save(os.path.join(full_output_folder, file), save_all=True, duration=int(1000.0/fps), append_images=pil_images[i + 1:i + num_frames], exif=metadata, lossless=lossless, quality=quality, method=method) + results.append({ + "filename": file, + "subfolder": subfolder, + "type": self.type + }) + counter += 1 + + animated = num_frames != 1 + return { "ui": { "images": results, "animated": (animated,) } } + NODE_CLASS_MAPPINGS = { "ImageCrop": ImageCrop, "RepeatImageBatch": RepeatImageBatch, + "SaveAnimatedWEBP": SaveAnimatedWEBP, } diff --git a/web/scripts/pnginfo.js b/web/scripts/pnginfo.js index 491caed79..f8cbe7a3c 100644 --- a/web/scripts/pnginfo.js +++ b/web/scripts/pnginfo.js @@ -50,7 +50,6 @@ export function getPngMetadata(file) { function parseExifData(exifData) { // Check for the correct TIFF header (0x4949 for little-endian or 0x4D4D for big-endian) const isLittleEndian = new Uint16Array(exifData.slice(0, 2))[0] === 0x4949; - console.log(exifData); // Function to read 16-bit and 32-bit integers from binary data function readInt(offset, isLittleEndian, length) { @@ -126,6 +125,9 @@ export function getWebpMetadata(file) { const chunk_length = dataView.getUint32(offset + 4, true); const chunk_type = String.fromCharCode(...webp.slice(offset, offset + 4)); if (chunk_type === "EXIF") { + if (String.fromCharCode(...webp.slice(offset + 8, offset + 8 + 6)) == "Exif\0\0") { + offset += 6; + } let data = parseExifData(webp.slice(offset + 8, offset + 8 + chunk_length)); for (var key in data) { var value = data[key]; From 4d2437e68165cf12989dafe1ef0a26c3a0abc7f5 Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Thu, 23 Nov 2023 19:43:55 +0000 Subject: [PATCH 126/170] Call widget onRemove to remove element --- web/scripts/app.js | 1 + 1 file changed, 1 insertion(+) diff --git a/web/scripts/app.js b/web/scripts/app.js index 601e486e6..180416ef9 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -544,6 +544,7 @@ export class ComfyApp { } if (widgetIdx > -1) { + this.widgets[widgetIdx].onRemove?.(); this.widgets.splice(widgetIdx, 1); } From 022033a0e75901c7c357ab96e1c804fd5da05770 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 23 Nov 2023 15:06:35 -0500 Subject: [PATCH 127/170] Fix SaveAnimatedWEBP not working when metadata is disabled. --- comfy_extras/nodes_images.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/comfy_extras/nodes_images.py b/comfy_extras/nodes_images.py index 18c579190..8c6ae5387 100644 --- a/comfy_extras/nodes_images.py +++ b/comfy_extras/nodes_images.py @@ -75,7 +75,7 @@ class SaveAnimatedWEBP: CATEGORY = "_for_testing" def save_images(self, images, fps, filename_prefix, lossless, quality, method, num_frames=0, prompt=None, extra_pnginfo=None): - method = self.methods.get(method, "aoeu") + method = self.methods.get(method) filename_prefix += self.prefix_append full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir, images[0].shape[1], images[0].shape[0]) results = list() @@ -85,9 +85,8 @@ class SaveAnimatedWEBP: img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8)) pil_images.append(img) - metadata = None + metadata = pil_images[0].getexif() if not args.disable_metadata: - metadata = pil_images[0].getexif() if prompt is not None: metadata[0x0110] = "prompt:{}".format(json.dumps(prompt)) if extra_pnginfo is not None: From 1964bf1e78dda9c6c7cf1b561068b835639aa166 Mon Sep 17 00:00:00 2001 From: Enrico Fasoli Date: Thu, 23 Nov 2023 22:24:58 +0100 Subject: [PATCH 128/170] fix: folder handling issues --- folder_paths.py | 5 ++++- nodes.py | 2 +- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/folder_paths.py b/folder_paths.py index 4a38deec0..7046255e4 100644 --- a/folder_paths.py +++ b/folder_paths.py @@ -38,7 +38,10 @@ input_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), "inp filename_list_cache = {} if not os.path.exists(input_directory): - os.makedirs(input_directory) + try: + os.makedirs(input_directory) + except: + print("Failed to create input directory") def set_output_directory(output_dir): global output_directory diff --git a/nodes.py b/nodes.py index 2de468da7..27b8b1c1b 100644 --- a/nodes.py +++ b/nodes.py @@ -1808,7 +1808,7 @@ def load_custom_nodes(): node_paths = folder_paths.get_folder_paths("custom_nodes") node_import_times = [] for custom_node_path in node_paths: - possible_modules = os.listdir(custom_node_path) + possible_modules = os.listdir(os.path.realpath(custom_node_path)) if "__pycache__" in possible_modules: possible_modules.remove("__pycache__") From 871cc20e13e9ef2629e3b5faa6af64207e86d6d2 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 23 Nov 2023 19:41:33 -0500 Subject: [PATCH 129/170] Support SVD img2vid model. --- comfy/cldm/cldm.py | 1 + comfy/ldm/modules/attention.py | 271 ++++++++++++-- .../modules/diffusionmodules/openaimodel.py | 348 +++++++++++++++--- comfy/ldm/modules/diffusionmodules/util.py | 69 +++- comfy/ldm/modules/temporal_ae.py | 244 ++++++++++++ comfy/model_base.py | 56 ++- comfy/model_detection.py | 18 +- comfy/model_sampling.py | 46 ++- comfy/sd.py | 10 +- comfy/supported_models.py | 36 +- comfy_extras/nodes_model_advanced.py | 31 ++ 11 files changed, 1030 insertions(+), 100 deletions(-) create mode 100644 comfy/ldm/modules/temporal_ae.py diff --git a/comfy/cldm/cldm.py b/comfy/cldm/cldm.py index 9a63202ab..76a525b37 100644 --- a/comfy/cldm/cldm.py +++ b/comfy/cldm/cldm.py @@ -54,6 +54,7 @@ class ControlNet(nn.Module): transformer_depth_output=None, device=None, operations=comfy.ops, + **kwargs, ): super().__init__() assert use_spatial_transformer == True, "use_spatial_transformer has to be true" diff --git a/comfy/ldm/modules/attention.py b/comfy/ldm/modules/attention.py index 016795a59..947e2008c 100644 --- a/comfy/ldm/modules/attention.py +++ b/comfy/ldm/modules/attention.py @@ -5,8 +5,10 @@ import torch.nn.functional as F from torch import nn, einsum from einops import rearrange, repeat from typing import Optional, Any +from functools import partial -from .diffusionmodules.util import checkpoint + +from .diffusionmodules.util import checkpoint, AlphaBlender, timestep_embedding from .sub_quadratic_attention import efficient_dot_product_attention from comfy import model_management @@ -370,21 +372,45 @@ class CrossAttention(nn.Module): class BasicTransformerBlock(nn.Module): - def __init__(self, dim, n_heads, d_head, dropout=0., context_dim=None, gated_ff=True, checkpoint=True, - disable_self_attn=False, dtype=None, device=None, operations=comfy.ops): + def __init__(self, dim, n_heads, d_head, dropout=0., context_dim=None, gated_ff=True, checkpoint=True, ff_in=False, inner_dim=None, + disable_self_attn=False, disable_temporal_crossattention=False, switch_temporal_ca_to_sa=False, dtype=None, device=None, operations=comfy.ops): super().__init__() + + self.ff_in = ff_in or inner_dim is not None + if inner_dim is None: + inner_dim = dim + + self.is_res = inner_dim == dim + + if self.ff_in: + self.norm_in = nn.LayerNorm(dim, dtype=dtype, device=device) + self.ff_in = FeedForward(dim, dim_out=inner_dim, dropout=dropout, glu=gated_ff, dtype=dtype, device=device, operations=operations) + self.disable_self_attn = disable_self_attn - self.attn1 = CrossAttention(query_dim=dim, heads=n_heads, dim_head=d_head, dropout=dropout, + self.attn1 = CrossAttention(query_dim=inner_dim, heads=n_heads, dim_head=d_head, dropout=dropout, context_dim=context_dim if self.disable_self_attn else None, dtype=dtype, device=device, operations=operations) # is a self-attention if not self.disable_self_attn - self.ff = FeedForward(dim, dropout=dropout, glu=gated_ff, dtype=dtype, device=device, operations=operations) - self.attn2 = CrossAttention(query_dim=dim, context_dim=context_dim, - heads=n_heads, dim_head=d_head, dropout=dropout, dtype=dtype, device=device, operations=operations) # is self-attn if context is none - self.norm1 = nn.LayerNorm(dim, dtype=dtype, device=device) - self.norm2 = nn.LayerNorm(dim, dtype=dtype, device=device) - self.norm3 = nn.LayerNorm(dim, dtype=dtype, device=device) + self.ff = FeedForward(inner_dim, dim_out=dim, dropout=dropout, glu=gated_ff, dtype=dtype, device=device, operations=operations) + + if disable_temporal_crossattention: + if switch_temporal_ca_to_sa: + raise ValueError + else: + self.attn2 = None + else: + context_dim_attn2 = None + if not switch_temporal_ca_to_sa: + context_dim_attn2 = context_dim + + self.attn2 = CrossAttention(query_dim=inner_dim, context_dim=context_dim_attn2, + heads=n_heads, dim_head=d_head, dropout=dropout, dtype=dtype, device=device, operations=operations) # is self-attn if context is none + self.norm2 = nn.LayerNorm(inner_dim, dtype=dtype, device=device) + + self.norm1 = nn.LayerNorm(inner_dim, dtype=dtype, device=device) + self.norm3 = nn.LayerNorm(inner_dim, dtype=dtype, device=device) self.checkpoint = checkpoint self.n_heads = n_heads self.d_head = d_head + self.switch_temporal_ca_to_sa = switch_temporal_ca_to_sa def forward(self, x, context=None, transformer_options={}): return checkpoint(self._forward, (x, context, transformer_options), self.parameters(), self.checkpoint) @@ -418,6 +444,12 @@ class BasicTransformerBlock(nn.Module): else: transformer_patches_replace = {} + if self.ff_in: + x_skip = x + x = self.ff_in(self.norm_in(x)) + if self.is_res: + x += x_skip + n = self.norm1(x) if self.disable_self_attn: context_attn1 = context @@ -465,31 +497,34 @@ class BasicTransformerBlock(nn.Module): for p in patch: x = p(x, extra_options) - n = self.norm2(x) - - context_attn2 = context - value_attn2 = None - if "attn2_patch" in transformer_patches: - patch = transformer_patches["attn2_patch"] - value_attn2 = context_attn2 - for p in patch: - n, context_attn2, value_attn2 = p(n, context_attn2, value_attn2, extra_options) - - attn2_replace_patch = transformer_patches_replace.get("attn2", {}) - block_attn2 = transformer_block - if block_attn2 not in attn2_replace_patch: - block_attn2 = block - - if block_attn2 in attn2_replace_patch: - if value_attn2 is None: + if self.attn2 is not None: + n = self.norm2(x) + if self.switch_temporal_ca_to_sa: + context_attn2 = n + else: + context_attn2 = context + value_attn2 = None + if "attn2_patch" in transformer_patches: + patch = transformer_patches["attn2_patch"] value_attn2 = context_attn2 - n = self.attn2.to_q(n) - context_attn2 = self.attn2.to_k(context_attn2) - value_attn2 = self.attn2.to_v(value_attn2) - n = attn2_replace_patch[block_attn2](n, context_attn2, value_attn2, extra_options) - n = self.attn2.to_out(n) - else: - n = self.attn2(n, context=context_attn2, value=value_attn2) + for p in patch: + n, context_attn2, value_attn2 = p(n, context_attn2, value_attn2, extra_options) + + attn2_replace_patch = transformer_patches_replace.get("attn2", {}) + block_attn2 = transformer_block + if block_attn2 not in attn2_replace_patch: + block_attn2 = block + + if block_attn2 in attn2_replace_patch: + if value_attn2 is None: + value_attn2 = context_attn2 + n = self.attn2.to_q(n) + context_attn2 = self.attn2.to_k(context_attn2) + value_attn2 = self.attn2.to_v(value_attn2) + n = attn2_replace_patch[block_attn2](n, context_attn2, value_attn2, extra_options) + n = self.attn2.to_out(n) + else: + n = self.attn2(n, context=context_attn2, value=value_attn2) if "attn2_output_patch" in transformer_patches: patch = transformer_patches["attn2_output_patch"] @@ -497,7 +532,12 @@ class BasicTransformerBlock(nn.Module): n = p(n, extra_options) x += n - x = self.ff(self.norm3(x)) + x + if self.is_res: + x_skip = x + x = self.ff(self.norm3(x)) + if self.is_res: + x += x_skip + return x @@ -565,3 +605,164 @@ class SpatialTransformer(nn.Module): x = self.proj_out(x) return x + x_in + +class SpatialVideoTransformer(SpatialTransformer): + def __init__( + self, + in_channels, + n_heads, + d_head, + depth=1, + dropout=0.0, + use_linear=False, + context_dim=None, + use_spatial_context=False, + timesteps=None, + merge_strategy: str = "fixed", + merge_factor: float = 0.5, + time_context_dim=None, + ff_in=False, + checkpoint=False, + time_depth=1, + disable_self_attn=False, + disable_temporal_crossattention=False, + max_time_embed_period: int = 10000, + dtype=None, device=None, operations=comfy.ops + ): + super().__init__( + in_channels, + n_heads, + d_head, + depth=depth, + dropout=dropout, + use_checkpoint=checkpoint, + context_dim=context_dim, + use_linear=use_linear, + disable_self_attn=disable_self_attn, + dtype=dtype, device=device, operations=operations + ) + self.time_depth = time_depth + self.depth = depth + self.max_time_embed_period = max_time_embed_period + + time_mix_d_head = d_head + n_time_mix_heads = n_heads + + time_mix_inner_dim = int(time_mix_d_head * n_time_mix_heads) + + inner_dim = n_heads * d_head + if use_spatial_context: + time_context_dim = context_dim + + self.time_stack = nn.ModuleList( + [ + BasicTransformerBlock( + inner_dim, + n_time_mix_heads, + time_mix_d_head, + dropout=dropout, + context_dim=time_context_dim, + # timesteps=timesteps, + checkpoint=checkpoint, + ff_in=ff_in, + inner_dim=time_mix_inner_dim, + disable_self_attn=disable_self_attn, + disable_temporal_crossattention=disable_temporal_crossattention, + dtype=dtype, device=device, operations=operations + ) + for _ in range(self.depth) + ] + ) + + assert len(self.time_stack) == len(self.transformer_blocks) + + self.use_spatial_context = use_spatial_context + self.in_channels = in_channels + + time_embed_dim = self.in_channels * 4 + self.time_pos_embed = nn.Sequential( + operations.Linear(self.in_channels, time_embed_dim, dtype=dtype, device=device), + nn.SiLU(), + operations.Linear(time_embed_dim, self.in_channels, dtype=dtype, device=device), + ) + + self.time_mixer = AlphaBlender( + alpha=merge_factor, merge_strategy=merge_strategy + ) + + def forward( + self, + x: torch.Tensor, + context: Optional[torch.Tensor] = None, + time_context: Optional[torch.Tensor] = None, + timesteps: Optional[int] = None, + image_only_indicator: Optional[torch.Tensor] = None, + transformer_options={} + ) -> torch.Tensor: + _, _, h, w = x.shape + x_in = x + spatial_context = None + if exists(context): + spatial_context = context + + if self.use_spatial_context: + assert ( + context.ndim == 3 + ), f"n dims of spatial context should be 3 but are {context.ndim}" + + if time_context is None: + time_context = context + time_context_first_timestep = time_context[::timesteps] + time_context = repeat( + time_context_first_timestep, "b ... -> (b n) ...", n=h * w + ) + elif time_context is not None and not self.use_spatial_context: + time_context = repeat(time_context, "b ... -> (b n) ...", n=h * w) + if time_context.ndim == 2: + time_context = rearrange(time_context, "b c -> b 1 c") + + x = self.norm(x) + if not self.use_linear: + x = self.proj_in(x) + x = rearrange(x, "b c h w -> b (h w) c") + if self.use_linear: + x = self.proj_in(x) + + num_frames = torch.arange(timesteps, device=x.device) + num_frames = repeat(num_frames, "t -> b t", b=x.shape[0] // timesteps) + num_frames = rearrange(num_frames, "b t -> (b t)") + t_emb = timestep_embedding(num_frames, self.in_channels, repeat_only=False, max_period=self.max_time_embed_period).to(x.dtype) + emb = self.time_pos_embed(t_emb) + emb = emb[:, None, :] + + for it_, (block, mix_block) in enumerate( + zip(self.transformer_blocks, self.time_stack) + ): + transformer_options["block_index"] = it_ + x = block( + x, + context=spatial_context, + transformer_options=transformer_options, + ) + + x_mix = x + x_mix = x_mix + emb + + B, S, C = x_mix.shape + x_mix = rearrange(x_mix, "(b t) s c -> (b s) t c", t=timesteps) + x_mix = mix_block(x_mix, context=time_context) #TODO: transformer_options + x_mix = rearrange( + x_mix, "(b s) t c -> (b t) s c", s=S, b=B // timesteps, c=C, t=timesteps + ) + + x = self.time_mixer(x_spatial=x, x_temporal=x_mix, image_only_indicator=image_only_indicator) + + if self.use_linear: + x = self.proj_out(x) + x = rearrange(x, "b (h w) c -> b c h w", h=h, w=w) + if not self.use_linear: + x = self.proj_out(x) + out = x + x_in + return out + + diff --git a/comfy/ldm/modules/diffusionmodules/openaimodel.py b/comfy/ldm/modules/diffusionmodules/openaimodel.py index e8f35a540..a497ed344 100644 --- a/comfy/ldm/modules/diffusionmodules/openaimodel.py +++ b/comfy/ldm/modules/diffusionmodules/openaimodel.py @@ -5,6 +5,8 @@ import numpy as np import torch as th import torch.nn as nn import torch.nn.functional as F +from einops import rearrange +from functools import partial from .util import ( checkpoint, @@ -12,8 +14,9 @@ from .util import ( zero_module, normalization, timestep_embedding, + AlphaBlender, ) -from ..attention import SpatialTransformer +from ..attention import SpatialTransformer, SpatialVideoTransformer, default from comfy.ldm.util import exists import comfy.ops @@ -29,10 +32,15 @@ class TimestepBlock(nn.Module): """ #This is needed because accelerate makes a copy of transformer_options which breaks "current_index" -def forward_timestep_embed(ts, x, emb, context=None, transformer_options={}, output_shape=None): +def forward_timestep_embed(ts, x, emb, context=None, transformer_options={}, output_shape=None, time_context=None, num_video_frames=None, image_only_indicator=None): for layer in ts: - if isinstance(layer, TimestepBlock): + if isinstance(layer, VideoResBlock): + x = layer(x, emb, num_video_frames, image_only_indicator) + elif isinstance(layer, TimestepBlock): x = layer(x, emb) + elif isinstance(layer, SpatialVideoTransformer): + x = layer(x, context, time_context, num_video_frames, image_only_indicator, transformer_options) + transformer_options["current_index"] += 1 elif isinstance(layer, SpatialTransformer): x = layer(x, context, transformer_options) if "current_index" in transformer_options: @@ -145,6 +153,9 @@ class ResBlock(TimestepBlock): use_checkpoint=False, up=False, down=False, + kernel_size=3, + exchange_temb_dims=False, + skip_t_emb=False, dtype=None, device=None, operations=comfy.ops @@ -157,11 +168,17 @@ class ResBlock(TimestepBlock): self.use_conv = use_conv self.use_checkpoint = use_checkpoint self.use_scale_shift_norm = use_scale_shift_norm + self.exchange_temb_dims = exchange_temb_dims + + if isinstance(kernel_size, list): + padding = [k // 2 for k in kernel_size] + else: + padding = kernel_size // 2 self.in_layers = nn.Sequential( nn.GroupNorm(32, channels, dtype=dtype, device=device), nn.SiLU(), - operations.conv_nd(dims, channels, self.out_channels, 3, padding=1, dtype=dtype, device=device), + operations.conv_nd(dims, channels, self.out_channels, kernel_size, padding=padding, dtype=dtype, device=device), ) self.updown = up or down @@ -175,19 +192,24 @@ class ResBlock(TimestepBlock): else: self.h_upd = self.x_upd = nn.Identity() - self.emb_layers = nn.Sequential( - nn.SiLU(), - operations.Linear( - emb_channels, - 2 * self.out_channels if use_scale_shift_norm else self.out_channels, dtype=dtype, device=device - ), - ) + self.skip_t_emb = skip_t_emb + if self.skip_t_emb: + self.emb_layers = None + self.exchange_temb_dims = False + else: + self.emb_layers = nn.Sequential( + nn.SiLU(), + operations.Linear( + emb_channels, + 2 * self.out_channels if use_scale_shift_norm else self.out_channels, dtype=dtype, device=device + ), + ) self.out_layers = nn.Sequential( nn.GroupNorm(32, self.out_channels, dtype=dtype, device=device), nn.SiLU(), nn.Dropout(p=dropout), zero_module( - operations.conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1, dtype=dtype, device=device) + operations.conv_nd(dims, self.out_channels, self.out_channels, kernel_size, padding=padding, dtype=dtype, device=device) ), ) @@ -195,7 +217,7 @@ class ResBlock(TimestepBlock): self.skip_connection = nn.Identity() elif use_conv: self.skip_connection = operations.conv_nd( - dims, channels, self.out_channels, 3, padding=1, dtype=dtype, device=device + dims, channels, self.out_channels, kernel_size, padding=padding, dtype=dtype, device=device ) else: self.skip_connection = operations.conv_nd(dims, channels, self.out_channels, 1, dtype=dtype, device=device) @@ -221,19 +243,110 @@ class ResBlock(TimestepBlock): h = in_conv(h) else: h = self.in_layers(x) - emb_out = self.emb_layers(emb).type(h.dtype) - while len(emb_out.shape) < len(h.shape): - emb_out = emb_out[..., None] + + emb_out = None + if not self.skip_t_emb: + emb_out = self.emb_layers(emb).type(h.dtype) + while len(emb_out.shape) < len(h.shape): + emb_out = emb_out[..., None] if self.use_scale_shift_norm: out_norm, out_rest = self.out_layers[0], self.out_layers[1:] - scale, shift = th.chunk(emb_out, 2, dim=1) - h = out_norm(h) * (1 + scale) + shift + h = out_norm(h) + if emb_out is not None: + scale, shift = th.chunk(emb_out, 2, dim=1) + h *= (1 + scale) + h += shift h = out_rest(h) else: - h = h + emb_out + if emb_out is not None: + if self.exchange_temb_dims: + emb_out = rearrange(emb_out, "b t c ... -> b c t ...") + h = h + emb_out h = self.out_layers(h) return self.skip_connection(x) + h + +class VideoResBlock(ResBlock): + def __init__( + self, + channels: int, + emb_channels: int, + dropout: float, + video_kernel_size=3, + merge_strategy: str = "fixed", + merge_factor: float = 0.5, + out_channels=None, + use_conv: bool = False, + use_scale_shift_norm: bool = False, + dims: int = 2, + use_checkpoint: bool = False, + up: bool = False, + down: bool = False, + dtype=None, + device=None, + operations=comfy.ops + ): + super().__init__( + channels, + emb_channels, + dropout, + out_channels=out_channels, + use_conv=use_conv, + use_scale_shift_norm=use_scale_shift_norm, + dims=dims, + use_checkpoint=use_checkpoint, + up=up, + down=down, + dtype=dtype, + device=device, + operations=operations + ) + + self.time_stack = ResBlock( + default(out_channels, channels), + emb_channels, + dropout=dropout, + dims=3, + out_channels=default(out_channels, channels), + use_scale_shift_norm=False, + use_conv=False, + up=False, + down=False, + kernel_size=video_kernel_size, + use_checkpoint=use_checkpoint, + exchange_temb_dims=True, + dtype=dtype, + device=device, + operations=operations + ) + self.time_mixer = AlphaBlender( + alpha=merge_factor, + merge_strategy=merge_strategy, + rearrange_pattern="b t -> b 1 t 1 1", + ) + + def forward( + self, + x: th.Tensor, + emb: th.Tensor, + num_video_frames: int, + image_only_indicator = None, + ) -> th.Tensor: + x = super().forward(x, emb) + + x_mix = rearrange(x, "(b t) c h w -> b c t h w", t=num_video_frames) + x = rearrange(x, "(b t) c h w -> b c t h w", t=num_video_frames) + + x = self.time_stack( + x, rearrange(emb, "(b t) ... -> b t ...", t=num_video_frames) + ) + x = self.time_mixer( + x_spatial=x_mix, x_temporal=x, image_only_indicator=image_only_indicator + ) + x = rearrange(x, "b c t h w -> (b t) c h w") + return x + + class Timestep(nn.Module): def __init__(self, dim): super().__init__() @@ -310,6 +423,16 @@ class UNetModel(nn.Module): adm_in_channels=None, transformer_depth_middle=None, transformer_depth_output=None, + use_temporal_resblock=False, + use_temporal_attention=False, + time_context_dim=None, + extra_ff_mix_layer=False, + use_spatial_context=False, + merge_strategy=None, + merge_factor=0.0, + video_kernel_size=None, + disable_temporal_crossattention=False, + max_ddpm_temb_period=10000, device=None, operations=comfy.ops, ): @@ -364,8 +487,12 @@ class UNetModel(nn.Module): self.num_heads = num_heads self.num_head_channels = num_head_channels self.num_heads_upsample = num_heads_upsample + self.use_temporal_resblocks = use_temporal_resblock self.predict_codebook_ids = n_embed is not None + self.default_num_video_frames = None + self.default_image_only_indicator = None + time_embed_dim = model_channels * 4 self.time_embed = nn.Sequential( operations.Linear(model_channels, time_embed_dim, dtype=self.dtype, device=device), @@ -402,13 +529,104 @@ class UNetModel(nn.Module): input_block_chans = [model_channels] ch = model_channels ds = 1 + + def get_attention_layer( + ch, + num_heads, + dim_head, + depth=1, + context_dim=None, + use_checkpoint=False, + disable_self_attn=False, + ): + if use_temporal_attention: + return SpatialVideoTransformer( + ch, + num_heads, + dim_head, + depth=depth, + context_dim=context_dim, + time_context_dim=time_context_dim, + dropout=dropout, + ff_in=extra_ff_mix_layer, + use_spatial_context=use_spatial_context, + merge_strategy=merge_strategy, + merge_factor=merge_factor, + checkpoint=use_checkpoint, + use_linear=use_linear_in_transformer, + disable_self_attn=disable_self_attn, + disable_temporal_crossattention=disable_temporal_crossattention, + max_time_embed_period=max_ddpm_temb_period, + dtype=self.dtype, device=device, operations=operations + ) + else: + return SpatialTransformer( + ch, num_heads, dim_head, depth=depth, context_dim=context_dim, + disable_self_attn=disable_self_attn, use_linear=use_linear_in_transformer, + use_checkpoint=use_checkpoint, dtype=self.dtype, device=device, operations=operations + ) + + def get_resblock( + merge_factor, + merge_strategy, + video_kernel_size, + ch, + time_embed_dim, + dropout, + out_channels, + dims, + use_checkpoint, + use_scale_shift_norm, + down=False, + up=False, + dtype=None, + device=None, + operations=comfy.ops + ): + if self.use_temporal_resblocks: + return VideoResBlock( + merge_factor=merge_factor, + merge_strategy=merge_strategy, + video_kernel_size=video_kernel_size, + channels=ch, + emb_channels=time_embed_dim, + dropout=dropout, + out_channels=out_channels, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + down=down, + up=up, + dtype=dtype, + device=device, + operations=operations + ) + else: + return ResBlock( + channels=ch, + emb_channels=time_embed_dim, + dropout=dropout, + out_channels=out_channels, + use_checkpoint=use_checkpoint, + dims=dims, + use_scale_shift_norm=use_scale_shift_norm, + down=down, + up=up, + dtype=dtype, + device=device, + operations=operations + ) + for level, mult in enumerate(channel_mult): for nr in range(self.num_res_blocks[level]): layers = [ - ResBlock( - ch, - time_embed_dim, - dropout, + get_resblock( + merge_factor=merge_factor, + merge_strategy=merge_strategy, + video_kernel_size=video_kernel_size, + ch=ch, + time_embed_dim=time_embed_dim, + dropout=dropout, out_channels=mult * model_channels, dims=dims, use_checkpoint=use_checkpoint, @@ -435,11 +653,9 @@ class UNetModel(nn.Module): disabled_sa = False if not exists(num_attention_blocks) or nr < num_attention_blocks[level]: - layers.append(SpatialTransformer( + layers.append(get_attention_layer( ch, num_heads, dim_head, depth=num_transformers, context_dim=context_dim, - disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer, - use_checkpoint=use_checkpoint, dtype=self.dtype, device=device, operations=operations - ) + disable_self_attn=disabled_sa, use_checkpoint=use_checkpoint) ) self.input_blocks.append(TimestepEmbedSequential(*layers)) self._feature_size += ch @@ -448,10 +664,13 @@ class UNetModel(nn.Module): out_ch = ch self.input_blocks.append( TimestepEmbedSequential( - ResBlock( - ch, - time_embed_dim, - dropout, + get_resblock( + merge_factor=merge_factor, + merge_strategy=merge_strategy, + video_kernel_size=video_kernel_size, + ch=ch, + time_embed_dim=time_embed_dim, + dropout=dropout, out_channels=out_ch, dims=dims, use_checkpoint=use_checkpoint, @@ -481,10 +700,14 @@ class UNetModel(nn.Module): #num_heads = 1 dim_head = ch // num_heads if use_spatial_transformer else num_head_channels mid_block = [ - ResBlock( - ch, - time_embed_dim, - dropout, + get_resblock( + merge_factor=merge_factor, + merge_strategy=merge_strategy, + video_kernel_size=video_kernel_size, + ch=ch, + time_embed_dim=time_embed_dim, + dropout=dropout, + out_channels=None, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, @@ -493,15 +716,18 @@ class UNetModel(nn.Module): operations=operations )] if transformer_depth_middle >= 0: - mid_block += [SpatialTransformer( # always uses a self-attn + mid_block += [get_attention_layer( # always uses a self-attn ch, num_heads, dim_head, depth=transformer_depth_middle, context_dim=context_dim, - disable_self_attn=disable_middle_self_attn, use_linear=use_linear_in_transformer, - use_checkpoint=use_checkpoint, dtype=self.dtype, device=device, operations=operations + disable_self_attn=disable_middle_self_attn, use_checkpoint=use_checkpoint ), - ResBlock( - ch, - time_embed_dim, - dropout, + get_resblock( + merge_factor=merge_factor, + merge_strategy=merge_strategy, + video_kernel_size=video_kernel_size, + ch=ch, + time_embed_dim=time_embed_dim, + dropout=dropout, + out_channels=None, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, @@ -517,10 +743,13 @@ class UNetModel(nn.Module): for i in range(self.num_res_blocks[level] + 1): ich = input_block_chans.pop() layers = [ - ResBlock( - ch + ich, - time_embed_dim, - dropout, + get_resblock( + merge_factor=merge_factor, + merge_strategy=merge_strategy, + video_kernel_size=video_kernel_size, + ch=ch + ich, + time_embed_dim=time_embed_dim, + dropout=dropout, out_channels=model_channels * mult, dims=dims, use_checkpoint=use_checkpoint, @@ -548,19 +777,21 @@ class UNetModel(nn.Module): if not exists(num_attention_blocks) or i < num_attention_blocks[level]: layers.append( - SpatialTransformer( + get_attention_layer( ch, num_heads, dim_head, depth=num_transformers, context_dim=context_dim, - disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer, - use_checkpoint=use_checkpoint, dtype=self.dtype, device=device, operations=operations + disable_self_attn=disabled_sa, use_checkpoint=use_checkpoint ) ) if level and i == self.num_res_blocks[level]: out_ch = ch layers.append( - ResBlock( - ch, - time_embed_dim, - dropout, + get_resblock( + merge_factor=merge_factor, + merge_strategy=merge_strategy, + video_kernel_size=video_kernel_size, + ch=ch, + time_embed_dim=time_embed_dim, + dropout=dropout, out_channels=out_ch, dims=dims, use_checkpoint=use_checkpoint, @@ -602,6 +833,10 @@ class UNetModel(nn.Module): transformer_options["current_index"] = 0 transformer_patches = transformer_options.get("patches", {}) + num_video_frames = kwargs.get("num_video_frames", self.default_num_video_frames) + image_only_indicator = kwargs.get("image_only_indicator", self.default_image_only_indicator) + time_context = kwargs.get("time_context", None) + assert (y is not None) == ( self.num_classes is not None ), "must specify y if and only if the model is class-conditional" @@ -616,7 +851,7 @@ class UNetModel(nn.Module): h = x.type(self.dtype) for id, module in enumerate(self.input_blocks): transformer_options["block"] = ("input", id) - h = forward_timestep_embed(module, h, emb, context, transformer_options) + h = forward_timestep_embed(module, h, emb, context, transformer_options, time_context=time_context, num_video_frames=num_video_frames, image_only_indicator=image_only_indicator) h = apply_control(h, control, 'input') if "input_block_patch" in transformer_patches: patch = transformer_patches["input_block_patch"] @@ -630,9 +865,10 @@ class UNetModel(nn.Module): h = p(h, transformer_options) transformer_options["block"] = ("middle", 0) - h = forward_timestep_embed(self.middle_block, h, emb, context, transformer_options) + h = forward_timestep_embed(self.middle_block, h, emb, context, transformer_options, time_context=time_context, num_video_frames=num_video_frames, image_only_indicator=image_only_indicator) h = apply_control(h, control, 'middle') + for id, module in enumerate(self.output_blocks): transformer_options["block"] = ("output", id) hsp = hs.pop() @@ -649,7 +885,7 @@ class UNetModel(nn.Module): output_shape = hs[-1].shape else: output_shape = None - h = forward_timestep_embed(module, h, emb, context, transformer_options, output_shape) + h = forward_timestep_embed(module, h, emb, context, transformer_options, output_shape, time_context=time_context, num_video_frames=num_video_frames, image_only_indicator=image_only_indicator) h = h.type(x.dtype) if self.predict_codebook_ids: return self.id_predictor(h) diff --git a/comfy/ldm/modules/diffusionmodules/util.py b/comfy/ldm/modules/diffusionmodules/util.py index 0298ca99d..704bbe574 100644 --- a/comfy/ldm/modules/diffusionmodules/util.py +++ b/comfy/ldm/modules/diffusionmodules/util.py @@ -13,11 +13,78 @@ import math import torch import torch.nn as nn import numpy as np -from einops import repeat +from einops import repeat, rearrange from comfy.ldm.util import instantiate_from_config import comfy.ops +class AlphaBlender(nn.Module): + strategies = ["learned", "fixed", "learned_with_images"] + + def __init__( + self, + alpha: float, + merge_strategy: str = "learned_with_images", + rearrange_pattern: str = "b t -> (b t) 1 1", + ): + super().__init__() + self.merge_strategy = merge_strategy + self.rearrange_pattern = rearrange_pattern + + assert ( + merge_strategy in self.strategies + ), f"merge_strategy needs to be in {self.strategies}" + + if self.merge_strategy == "fixed": + self.register_buffer("mix_factor", torch.Tensor([alpha])) + elif ( + self.merge_strategy == "learned" + or self.merge_strategy == "learned_with_images" + ): + self.register_parameter( + "mix_factor", torch.nn.Parameter(torch.Tensor([alpha])) + ) + else: + raise ValueError(f"unknown merge strategy {self.merge_strategy}") + + def get_alpha(self, image_only_indicator: torch.Tensor) -> torch.Tensor: + # skip_time_mix = rearrange(repeat(skip_time_mix, 'b -> (b t) () () ()', t=t), '(b t) 1 ... -> b 1 t ...', t=t) + if self.merge_strategy == "fixed": + # make shape compatible + # alpha = repeat(self.mix_factor, '1 -> b () t () ()', t=t, b=bs) + alpha = self.mix_factor + elif self.merge_strategy == "learned": + alpha = torch.sigmoid(self.mix_factor) + # make shape compatible + # alpha = repeat(alpha, '1 -> s () ()', s = t * bs) + elif self.merge_strategy == "learned_with_images": + assert image_only_indicator is not None, "need image_only_indicator ..." + alpha = torch.where( + image_only_indicator.bool(), + torch.ones(1, 1, device=image_only_indicator.device), + rearrange(torch.sigmoid(self.mix_factor), "... -> ... 1"), + ) + alpha = rearrange(alpha, self.rearrange_pattern) + # make shape compatible + # alpha = repeat(alpha, '1 -> s () ()', s = t * bs) + else: + raise NotImplementedError() + return alpha + + def forward( + self, + x_spatial, + x_temporal, + image_only_indicator=None, + ) -> torch.Tensor: + alpha = self.get_alpha(image_only_indicator) + x = ( + alpha.to(x_spatial.dtype) * x_spatial + + (1.0 - alpha).to(x_spatial.dtype) * x_temporal + ) + return x + + def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): if schedule == "linear": betas = ( diff --git a/comfy/ldm/modules/temporal_ae.py b/comfy/ldm/modules/temporal_ae.py new file mode 100644 index 000000000..11ae049f3 --- /dev/null +++ b/comfy/ldm/modules/temporal_ae.py @@ -0,0 +1,244 @@ +import functools +from typing import Callable, Iterable, Union + +import torch +from einops import rearrange, repeat + +import comfy.ops + +from .diffusionmodules.model import ( + AttnBlock, + Decoder, + ResnetBlock, +) +from .diffusionmodules.openaimodel import ResBlock, timestep_embedding +from .attention import BasicTransformerBlock + +def partialclass(cls, *args, **kwargs): + class NewCls(cls): + __init__ = functools.partialmethod(cls.__init__, *args, **kwargs) + + return NewCls + + +class VideoResBlock(ResnetBlock): + def __init__( + self, + out_channels, + *args, + dropout=0.0, + video_kernel_size=3, + alpha=0.0, + merge_strategy="learned", + **kwargs, + ): + super().__init__(out_channels=out_channels, dropout=dropout, *args, **kwargs) + if video_kernel_size is None: + video_kernel_size = [3, 1, 1] + self.time_stack = ResBlock( + channels=out_channels, + emb_channels=0, + dropout=dropout, + dims=3, + use_scale_shift_norm=False, + use_conv=False, + up=False, + down=False, + kernel_size=video_kernel_size, + use_checkpoint=False, + skip_t_emb=True, + ) + + self.merge_strategy = merge_strategy + if self.merge_strategy == "fixed": + self.register_buffer("mix_factor", torch.Tensor([alpha])) + elif self.merge_strategy == "learned": + self.register_parameter( + "mix_factor", torch.nn.Parameter(torch.Tensor([alpha])) + ) + else: + raise ValueError(f"unknown merge strategy {self.merge_strategy}") + + def get_alpha(self, bs): + if self.merge_strategy == "fixed": + return self.mix_factor + elif self.merge_strategy == "learned": + return torch.sigmoid(self.mix_factor) + else: + raise NotImplementedError() + + def forward(self, x, temb, skip_video=False, timesteps=None): + b, c, h, w = x.shape + if timesteps is None: + timesteps = b + + x = super().forward(x, temb) + + if not skip_video: + x_mix = rearrange(x, "(b t) c h w -> b c t h w", t=timesteps) + + x = rearrange(x, "(b t) c h w -> b c t h w", t=timesteps) + + x = self.time_stack(x, temb) + + alpha = self.get_alpha(bs=b // timesteps) + x = alpha * x + (1.0 - alpha) * x_mix + + x = rearrange(x, "b c t h w -> (b t) c h w") + return x + + +class AE3DConv(torch.nn.Conv2d): + def __init__(self, in_channels, out_channels, video_kernel_size=3, *args, **kwargs): + super().__init__(in_channels, out_channels, *args, **kwargs) + if isinstance(video_kernel_size, Iterable): + padding = [int(k // 2) for k in video_kernel_size] + else: + padding = int(video_kernel_size // 2) + + self.time_mix_conv = torch.nn.Conv3d( + in_channels=out_channels, + out_channels=out_channels, + kernel_size=video_kernel_size, + padding=padding, + ) + + def forward(self, input, timesteps=None, skip_video=False): + if timesteps is None: + timesteps = input.shape[0] + x = super().forward(input) + if skip_video: + return x + x = rearrange(x, "(b t) c h w -> b c t h w", t=timesteps) + x = self.time_mix_conv(x) + return rearrange(x, "b c t h w -> (b t) c h w") + + +class AttnVideoBlock(AttnBlock): + def __init__( + self, in_channels: int, alpha: float = 0, merge_strategy: str = "learned" + ): + super().__init__(in_channels) + # no context, single headed, as in base class + self.time_mix_block = BasicTransformerBlock( + dim=in_channels, + n_heads=1, + d_head=in_channels, + checkpoint=False, + ff_in=True, + ) + + time_embed_dim = self.in_channels * 4 + self.video_time_embed = torch.nn.Sequential( + comfy.ops.Linear(self.in_channels, time_embed_dim), + torch.nn.SiLU(), + comfy.ops.Linear(time_embed_dim, self.in_channels), + ) + + self.merge_strategy = merge_strategy + if self.merge_strategy == "fixed": + self.register_buffer("mix_factor", torch.Tensor([alpha])) + elif self.merge_strategy == "learned": + self.register_parameter( + "mix_factor", torch.nn.Parameter(torch.Tensor([alpha])) + ) + else: + raise ValueError(f"unknown merge strategy {self.merge_strategy}") + + def forward(self, x, timesteps=None, skip_time_block=False): + if skip_time_block: + return super().forward(x) + + if timesteps is None: + timesteps = x.shape[0] + + x_in = x + x = self.attention(x) + h, w = x.shape[2:] + x = rearrange(x, "b c h w -> b (h w) c") + + x_mix = x + num_frames = torch.arange(timesteps, device=x.device) + num_frames = repeat(num_frames, "t -> b t", b=x.shape[0] // timesteps) + num_frames = rearrange(num_frames, "b t -> (b t)") + t_emb = timestep_embedding(num_frames, self.in_channels, repeat_only=False) + emb = self.video_time_embed(t_emb) # b, n_channels + emb = emb[:, None, :] + x_mix = x_mix + emb + + alpha = self.get_alpha() + x_mix = self.time_mix_block(x_mix, timesteps=timesteps) + x = alpha * x + (1.0 - alpha) * x_mix # alpha merge + + x = rearrange(x, "b (h w) c -> b c h w", h=h, w=w) + x = self.proj_out(x) + + return x_in + x + + def get_alpha( + self, + ): + if self.merge_strategy == "fixed": + return self.mix_factor + elif self.merge_strategy == "learned": + return torch.sigmoid(self.mix_factor) + else: + raise NotImplementedError(f"unknown merge strategy {self.merge_strategy}") + + + +def make_time_attn( + in_channels, + attn_type="vanilla", + attn_kwargs=None, + alpha: float = 0, + merge_strategy: str = "learned", +): + return partialclass( + AttnVideoBlock, in_channels, alpha=alpha, merge_strategy=merge_strategy + ) + + +class Conv2DWrapper(torch.nn.Conv2d): + def forward(self, input: torch.Tensor, **kwargs) -> torch.Tensor: + return super().forward(input) + + +class VideoDecoder(Decoder): + available_time_modes = ["all", "conv-only", "attn-only"] + + def __init__( + self, + *args, + video_kernel_size: Union[int, list] = 3, + alpha: float = 0.0, + merge_strategy: str = "learned", + time_mode: str = "conv-only", + **kwargs, + ): + self.video_kernel_size = video_kernel_size + self.alpha = alpha + self.merge_strategy = merge_strategy + self.time_mode = time_mode + assert ( + self.time_mode in self.available_time_modes + ), f"time_mode parameter has to be in {self.available_time_modes}" + + if self.time_mode != "attn-only": + kwargs["conv_out_op"] = partialclass(AE3DConv, video_kernel_size=self.video_kernel_size) + if self.time_mode not in ["conv-only", "only-last-conv"]: + kwargs["attn_op"] = partialclass(make_time_attn, alpha=self.alpha, merge_strategy=self.merge_strategy) + if self.time_mode not in ["attn-only", "only-last-conv"]: + kwargs["resnet_op"] = partialclass(VideoResBlock, video_kernel_size=self.video_kernel_size, alpha=self.alpha, merge_strategy=self.merge_strategy) + + super().__init__(*args, **kwargs) + + def get_last_layer(self, skip_time_mix=False, **kwargs): + if self.time_mode == "attn-only": + raise NotImplementedError("TODO") + else: + return ( + self.conv_out.time_mix_conv.weight + if not skip_time_mix + else self.conv_out.weight + ) diff --git a/comfy/model_base.py b/comfy/model_base.py index 772e26934..34274c4ae 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -10,17 +10,22 @@ from . import utils class ModelType(Enum): EPS = 1 V_PREDICTION = 2 + V_PREDICTION_EDM = 3 -from comfy.model_sampling import EPS, V_PREDICTION, ModelSamplingDiscrete +from comfy.model_sampling import EPS, V_PREDICTION, ModelSamplingDiscrete, ModelSamplingContinuousEDM + def model_sampling(model_config, model_type): + s = ModelSamplingDiscrete + if model_type == ModelType.EPS: c = EPS elif model_type == ModelType.V_PREDICTION: c = V_PREDICTION - - s = ModelSamplingDiscrete + elif model_type == ModelType.V_PREDICTION_EDM: + c = V_PREDICTION + s = ModelSamplingContinuousEDM class ModelSampling(s, c): pass @@ -262,3 +267,48 @@ class SDXL(BaseModel): out.append(self.embedder(torch.Tensor([target_width]))) flat = torch.flatten(torch.cat(out)).unsqueeze(dim=0).repeat(clip_pooled.shape[0], 1) return torch.cat((clip_pooled.to(flat.device), flat), dim=1) + +class SVD_img2vid(BaseModel): + def __init__(self, model_config, model_type=ModelType.V_PREDICTION_EDM, device=None): + super().__init__(model_config, model_type, device=device) + self.embedder = Timestep(256) + + def encode_adm(self, **kwargs): + fps_id = kwargs.get("fps", 6) - 1 + motion_bucket_id = kwargs.get("motion_bucket_id", 127) + augmentation = kwargs.get("augmentation_level", 0) + + out = [] + out.append(self.embedder(torch.Tensor([fps_id]))) + out.append(self.embedder(torch.Tensor([motion_bucket_id]))) + out.append(self.embedder(torch.Tensor([augmentation]))) + + flat = torch.flatten(torch.cat(out)).unsqueeze(dim=0) + return flat + + def extra_conds(self, **kwargs): + out = {} + adm = self.encode_adm(**kwargs) + if adm is not None: + out['y'] = comfy.conds.CONDRegular(adm) + + latent_image = kwargs.get("concat_latent_image", None) + noise = kwargs.get("noise", None) + device = kwargs["device"] + + if latent_image is None: + latent_image = torch.zeros_like(noise) + + if latent_image.shape[1:] != noise.shape[1:]: + latent_image = utils.common_upscale(latent_image, noise.shape[-1], noise.shape[-2], "bilinear", "center") + + latent_image = utils.repeat_to_batch_size(latent_image, noise.shape[0]) + + out['c_concat'] = comfy.conds.CONDNoiseShape(latent_image) + + if "time_conditioning" in kwargs: + out["time_context"] = comfy.conds.CONDCrossAttn(kwargs["time_conditioning"]) + + out['image_only_indicator'] = comfy.conds.CONDConstant(torch.zeros((1,), device=device)) + out['num_video_frames'] = comfy.conds.CONDConstant(noise.shape[0]) + return out diff --git a/comfy/model_detection.py b/comfy/model_detection.py index d65d91e7c..45d603a0c 100644 --- a/comfy/model_detection.py +++ b/comfy/model_detection.py @@ -24,7 +24,8 @@ def calculate_transformer_depth(prefix, state_dict_keys, state_dict): last_transformer_depth = count_blocks(state_dict_keys, transformer_prefix + '{}') context_dim = state_dict['{}0.attn2.to_k.weight'.format(transformer_prefix)].shape[1] use_linear_in_transformer = len(state_dict['{}1.proj_in.weight'.format(prefix)].shape) == 2 - return last_transformer_depth, context_dim, use_linear_in_transformer + time_stack = '{}1.time_stack.0.attn1.to_q.weight'.format(prefix) in state_dict or '{}1.time_mix_blocks.0.attn1.to_q.weight'.format(prefix) in state_dict + return last_transformer_depth, context_dim, use_linear_in_transformer, time_stack return None def detect_unet_config(state_dict, key_prefix, dtype): @@ -57,6 +58,7 @@ def detect_unet_config(state_dict, key_prefix, dtype): context_dim = None use_linear_in_transformer = False + video_model = False current_res = 1 count = 0 @@ -99,6 +101,7 @@ def detect_unet_config(state_dict, key_prefix, dtype): if context_dim is None: context_dim = out[1] use_linear_in_transformer = out[2] + video_model = out[3] else: transformer_depth.append(0) @@ -127,6 +130,19 @@ def detect_unet_config(state_dict, key_prefix, dtype): unet_config["transformer_depth_middle"] = transformer_depth_middle unet_config['use_linear_in_transformer'] = use_linear_in_transformer unet_config["context_dim"] = context_dim + + if video_model: + unet_config["extra_ff_mix_layer"] = True + unet_config["use_spatial_context"] = True + unet_config["merge_strategy"] = "learned_with_images" + unet_config["merge_factor"] = 0.0 + unet_config["video_kernel_size"] = [3, 1, 1] + unet_config["use_temporal_resblock"] = True + unet_config["use_temporal_attention"] = True + else: + unet_config["use_temporal_resblock"] = False + unet_config["use_temporal_attention"] = False + return unet_config def model_config_from_unet_config(unet_config): diff --git a/comfy/model_sampling.py b/comfy/model_sampling.py index 9e2a1c1af..fac5c995e 100644 --- a/comfy/model_sampling.py +++ b/comfy/model_sampling.py @@ -1,7 +1,7 @@ import torch import numpy as np from comfy.ldm.modules.diffusionmodules.util import make_beta_schedule - +import math class EPS: def calculate_input(self, sigma, noise): @@ -83,3 +83,47 @@ class ModelSamplingDiscrete(torch.nn.Module): percent = 1.0 - percent return self.sigma(torch.tensor(percent * 999.0)).item() + +class ModelSamplingContinuousEDM(torch.nn.Module): + def __init__(self, model_config=None): + super().__init__() + self.sigma_data = 1.0 + + if model_config is not None: + sampling_settings = model_config.sampling_settings + else: + sampling_settings = {} + + sigma_min = sampling_settings.get("sigma_min", 0.002) + sigma_max = sampling_settings.get("sigma_max", 120.0) + self.set_sigma_range(sigma_min, sigma_max) + + def set_sigma_range(self, sigma_min, sigma_max): + sigmas = torch.linspace(math.log(sigma_min), math.log(sigma_max), 1000).exp() + + self.register_buffer('sigmas', sigmas) #for compatibility with some schedulers + self.register_buffer('log_sigmas', sigmas.log()) + + @property + def sigma_min(self): + return self.sigmas[0] + + @property + def sigma_max(self): + return self.sigmas[-1] + + def timestep(self, sigma): + return 0.25 * sigma.log() + + def sigma(self, timestep): + return (timestep / 0.25).exp() + + def percent_to_sigma(self, percent): + if percent <= 0.0: + return 999999999.9 + if percent >= 1.0: + return 0.0 + percent = 1.0 - percent + + log_sigma_min = math.log(self.sigma_min) + return math.exp((math.log(self.sigma_max) - log_sigma_min) * percent + log_sigma_min) diff --git a/comfy/sd.py b/comfy/sd.py index a8df3bdd4..7f85540c4 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -159,7 +159,15 @@ class VAE: self.memory_used_decode = lambda shape, dtype: (2178 * shape[2] * shape[3] * 64) * model_management.dtype_size(dtype) if config is None: - if "taesd_decoder.1.weight" in sd: + if "decoder.mid.block_1.mix_factor" in sd: + encoder_config = {'double_z': True, 'z_channels': 4, 'resolution': 256, 'in_channels': 3, 'out_ch': 3, 'ch': 128, 'ch_mult': [1, 2, 4, 4], 'num_res_blocks': 2, 'attn_resolutions': [], 'dropout': 0.0} + decoder_config = encoder_config.copy() + decoder_config["video_kernel_size"] = [3, 1, 1] + decoder_config["alpha"] = 0.0 + self.first_stage_model = AutoencodingEngine(regularizer_config={'target': "comfy.ldm.models.autoencoder.DiagonalGaussianRegularizer"}, + encoder_config={'target': "comfy.ldm.modules.diffusionmodules.model.Encoder", 'params': encoder_config}, + decoder_config={'target': "comfy.ldm.modules.temporal_ae.VideoDecoder", 'params': decoder_config}) + elif "taesd_decoder.1.weight" in sd: self.first_stage_model = comfy.taesd.taesd.TAESD() else: #default SD1.x/SD2.x VAE parameters diff --git a/comfy/supported_models.py b/comfy/supported_models.py index fdd4ea4f5..7e2ac677d 100644 --- a/comfy/supported_models.py +++ b/comfy/supported_models.py @@ -17,6 +17,7 @@ class SD15(supported_models_base.BASE): "model_channels": 320, "use_linear_in_transformer": False, "adm_in_channels": None, + "use_temporal_attention": False, } unet_extra_config = { @@ -56,6 +57,7 @@ class SD20(supported_models_base.BASE): "model_channels": 320, "use_linear_in_transformer": True, "adm_in_channels": None, + "use_temporal_attention": False, } latent_format = latent_formats.SD15 @@ -88,6 +90,7 @@ class SD21UnclipL(SD20): "model_channels": 320, "use_linear_in_transformer": True, "adm_in_channels": 1536, + "use_temporal_attention": False, } clip_vision_prefix = "embedder.model.visual." @@ -100,6 +103,7 @@ class SD21UnclipH(SD20): "model_channels": 320, "use_linear_in_transformer": True, "adm_in_channels": 2048, + "use_temporal_attention": False, } clip_vision_prefix = "embedder.model.visual." @@ -112,6 +116,7 @@ class SDXLRefiner(supported_models_base.BASE): "context_dim": 1280, "adm_in_channels": 2560, "transformer_depth": [0, 0, 4, 4, 4, 4, 0, 0], + "use_temporal_attention": False, } latent_format = latent_formats.SDXL @@ -148,7 +153,8 @@ class SDXL(supported_models_base.BASE): "use_linear_in_transformer": True, "transformer_depth": [0, 0, 2, 2, 10, 10], "context_dim": 2048, - "adm_in_channels": 2816 + "adm_in_channels": 2816, + "use_temporal_attention": False, } latent_format = latent_formats.SDXL @@ -203,8 +209,34 @@ class SSD1B(SDXL): "use_linear_in_transformer": True, "transformer_depth": [0, 0, 2, 2, 4, 4], "context_dim": 2048, - "adm_in_channels": 2816 + "adm_in_channels": 2816, + "use_temporal_attention": False, } +class SVD_img2vid(supported_models_base.BASE): + unet_config = { + "model_channels": 320, + "in_channels": 8, + "use_linear_in_transformer": True, + "transformer_depth": [1, 1, 1, 1, 1, 1, 0, 0], + "context_dim": 1024, + "adm_in_channels": 768, + "use_temporal_attention": True, + "use_temporal_resblock": True + } + + clip_vision_prefix = "conditioner.embedders.0.open_clip.model.visual." + + latent_format = latent_formats.SD15 + + sampling_settings = {"sigma_max": 700.0, "sigma_min": 0.002} + + def get_model(self, state_dict, prefix="", device=None): + out = model_base.SVD_img2vid(self, device=device) + return out + + def clip_target(self): + return None models = [SD15, SD20, SD21UnclipL, SD21UnclipH, SDXLRefiner, SDXL, SSD1B] +models += [SVD_img2vid] diff --git a/comfy_extras/nodes_model_advanced.py b/comfy_extras/nodes_model_advanced.py index 0f4ddd9c3..6991c9837 100644 --- a/comfy_extras/nodes_model_advanced.py +++ b/comfy_extras/nodes_model_advanced.py @@ -128,6 +128,36 @@ class ModelSamplingDiscrete: m.add_object_patch("model_sampling", model_sampling) return (m, ) +class ModelSamplingContinuousEDM: + @classmethod + def INPUT_TYPES(s): + return {"required": { "model": ("MODEL",), + "sampling": (["v_prediction", "eps"],), + "sigma_max": ("FLOAT", {"default": 120.0, "min": 0.0, "max": 1000.0, "step":0.001, "round": False}), + "sigma_min": ("FLOAT", {"default": 0.002, "min": 0.0, "max": 1000.0, "step":0.001, "round": False}), + }} + + RETURN_TYPES = ("MODEL",) + FUNCTION = "patch" + + CATEGORY = "advanced/model" + + def patch(self, model, sampling, sigma_max, sigma_min): + m = model.clone() + + if sampling == "eps": + sampling_type = comfy.model_sampling.EPS + elif sampling == "v_prediction": + sampling_type = comfy.model_sampling.V_PREDICTION + + class ModelSamplingAdvanced(comfy.model_sampling.ModelSamplingContinuousEDM, sampling_type): + pass + + model_sampling = ModelSamplingAdvanced() + model_sampling.set_sigma_range(sigma_min, sigma_max) + m.add_object_patch("model_sampling", model_sampling) + return (m, ) + class RescaleCFG: @classmethod def INPUT_TYPES(s): @@ -169,5 +199,6 @@ class RescaleCFG: NODE_CLASS_MAPPINGS = { "ModelSamplingDiscrete": ModelSamplingDiscrete, + "ModelSamplingContinuousEDM": ModelSamplingContinuousEDM, "RescaleCFG": RescaleCFG, } From 42dfae63312f443d13841a0c4a5de467f5c354c9 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 23 Nov 2023 19:43:09 -0500 Subject: [PATCH 130/170] Nodes to properly use the SDV img2vid checkpoint. The img2vid model is conditioned on clip vision output only which means there's no CLIP model which is why I added a ImageOnlyCheckpointLoader to load it. Note that the unClipCheckpointLoader can also load it because it also has a CLIP_VISION output. SDV_img2vid_Conditioning is the node used to pass the right conditioning to the img2vid model. VideoLinearCFGGuidance applies a linearly decreasing CFG scale to each video frame from the cfg set in the sampler node to min_cfg. SDV_img2vid_Conditioning can be found in conditioning->video_models ImageOnlyCheckpointLoader can be found in loaders->video_models VideoLinearCFGGuidance can be found in sampling->video_models --- comfy_extras/nodes_video_model.py | 89 +++++++++++++++++++++++++++++++ nodes.py | 1 + 2 files changed, 90 insertions(+) create mode 100644 comfy_extras/nodes_video_model.py diff --git a/comfy_extras/nodes_video_model.py b/comfy_extras/nodes_video_model.py new file mode 100644 index 000000000..92bd883ae --- /dev/null +++ b/comfy_extras/nodes_video_model.py @@ -0,0 +1,89 @@ +import nodes +import torch +import comfy.utils +import comfy.sd +import folder_paths + + +class ImageOnlyCheckpointLoader: + @classmethod + def INPUT_TYPES(s): + return {"required": { "ckpt_name": (folder_paths.get_filename_list("checkpoints"), ), + }} + RETURN_TYPES = ("MODEL", "CLIP_VISION", "VAE") + FUNCTION = "load_checkpoint" + + CATEGORY = "loaders/video_models" + + def load_checkpoint(self, ckpt_name, output_vae=True, output_clip=True): + ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name) + out = comfy.sd.load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=False, output_clipvision=True, embedding_directory=folder_paths.get_folder_paths("embeddings")) + return (out[0], out[3], out[2]) + + +class SDV_img2vid_Conditioning: + @classmethod + def INPUT_TYPES(s): + return {"required": { "clip_vision": ("CLIP_VISION",), + "init_image": ("IMAGE",), + "vae": ("VAE",), + "width": ("INT", {"default": 1024, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 8}), + "height": ("INT", {"default": 576, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 8}), + "video_frames": ("INT", {"default": 14, "min": 1, "max": 4096}), + "motion_bucket_id": ("INT", {"default": 127, "min": 1, "max": 1023}), + "fps": ("INT", {"default": 6, "min": 1, "max": 1024}), + "augmentation_level": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 10.0, "step": 0.01}) + }} + RETURN_TYPES = ("CONDITIONING", "CONDITIONING", "LATENT") + RETURN_NAMES = ("positive", "negative", "latent") + + FUNCTION = "encode" + + CATEGORY = "conditioning/video_models" + + def encode(self, clip_vision, init_image, vae, width, height, video_frames, motion_bucket_id, fps, augmentation_level): + output = clip_vision.encode_image(init_image) + pooled = output.image_embeds.unsqueeze(0) + pixels = comfy.utils.common_upscale(init_image.movedim(-1,1), width, height, "bilinear", "center").movedim(1,-1) + encode_pixels = pixels[:,:,:,:3] + if augmentation_level > 0: + encode_pixels += torch.randn_like(pixels) * augmentation_level + t = vae.encode(encode_pixels) + positive = [[pooled, {"motion_bucket_id": motion_bucket_id, "fps": fps, "augmentation_level": augmentation_level, "concat_latent_image": t}]] + negative = [[torch.zeros_like(pooled), {"motion_bucket_id": motion_bucket_id, "fps": fps, "augmentation_level": augmentation_level, "concat_latent_image": torch.zeros_like(t)}]] + latent = torch.zeros([video_frames, 4, height // 8, width // 8]) + return (positive, negative, {"samples":latent}) + +class VideoLinearCFGGuidance: + @classmethod + def INPUT_TYPES(s): + return {"required": { "model": ("MODEL",), + "min_cfg": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0, "step":0.5, "round": 0.01}), + }} + RETURN_TYPES = ("MODEL",) + FUNCTION = "patch" + + CATEGORY = "sampling/video_models" + + def patch(self, model, min_cfg): + def linear_cfg(args): + cond = args["cond"] + uncond = args["uncond"] + cond_scale = args["cond_scale"] + + scale = torch.linspace(min_cfg, cond_scale, cond.shape[0], device=cond.device).reshape((cond.shape[0], 1, 1, 1)) + return uncond + scale * (cond - uncond) + + m = model.clone() + m.set_model_sampler_cfg_function(linear_cfg) + return (m, ) + +NODE_CLASS_MAPPINGS = { + "ImageOnlyCheckpointLoader": ImageOnlyCheckpointLoader, + "SDV_img2vid_Conditioning": SDV_img2vid_Conditioning, + "VideoLinearCFGGuidance": VideoLinearCFGGuidance, +} + +NODE_DISPLAY_NAME_MAPPINGS = { + "ImageOnlyCheckpointLoader": "Image Only Checkpoint Loader (img2vid model)", +} diff --git a/nodes.py b/nodes.py index 2de468da7..bb24bc6e8 100644 --- a/nodes.py +++ b/nodes.py @@ -1850,6 +1850,7 @@ def init_custom_nodes(): "nodes_model_advanced.py", "nodes_model_downscale.py", "nodes_images.py", + "nodes_video_model.py", ] for node_file in extras_files: From 02ffbb2de3e33d9d64d38c13e70e860d9af90101 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 23 Nov 2023 23:20:07 -0500 Subject: [PATCH 131/170] Fix typo. --- comfy_extras/nodes_video_model.py | 4 ++-- web/scripts/app.js | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/comfy_extras/nodes_video_model.py b/comfy_extras/nodes_video_model.py index 92bd883ae..26a717a38 100644 --- a/comfy_extras/nodes_video_model.py +++ b/comfy_extras/nodes_video_model.py @@ -21,7 +21,7 @@ class ImageOnlyCheckpointLoader: return (out[0], out[3], out[2]) -class SDV_img2vid_Conditioning: +class SVD_img2vid_Conditioning: @classmethod def INPUT_TYPES(s): return {"required": { "clip_vision": ("CLIP_VISION",), @@ -80,7 +80,7 @@ class VideoLinearCFGGuidance: NODE_CLASS_MAPPINGS = { "ImageOnlyCheckpointLoader": ImageOnlyCheckpointLoader, - "SDV_img2vid_Conditioning": SDV_img2vid_Conditioning, + "SVD_img2vid_Conditioning": SVD_img2vid_Conditioning, "VideoLinearCFGGuidance": VideoLinearCFGGuidance, } diff --git a/web/scripts/app.js b/web/scripts/app.js index 180416ef9..cd20c40fd 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -1523,6 +1523,7 @@ export class ComfyApp { // Patch T2IAdapterLoader to ControlNetLoader since they are the same node now if (n.type == "T2IAdapterLoader") n.type = "ControlNetLoader"; if (n.type == "ConditioningAverage ") n.type = "ConditioningAverage"; //typo fix + if (n.type == "SDV_img2vid_Conditioning") n.type = "SVD_img2vid_Conditioning"; //typo fix // Find missing node types if (!(n.type in LiteGraph.registered_node_types)) { From c782cf3ea95021b0d9fa95014b13e7c32f20fd6e Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 24 Nov 2023 00:27:08 -0500 Subject: [PATCH 132/170] Add to Readme that Stable Video Diffusion is supported. --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index f87c0404f..9d7e31790 100644 --- a/README.md +++ b/README.md @@ -11,7 +11,7 @@ This ui will let you design and execute advanced stable diffusion pipelines usin ## Features - Nodes/graph/flowchart interface to experiment and create complex Stable Diffusion workflows without needing to code anything. -- Fully supports SD1.x, SD2.x and SDXL +- Fully supports SD1.x, SD2.x, [SDXL](https://comfyanonymous.github.io/ComfyUI_examples/sdxl/) and [Stable Video Diffusion](https://comfyanonymous.github.io/ComfyUI_examples/video/) - Asynchronous Queue system - Many optimizations: Only re-executes the parts of the workflow that changes between executions. - Command line option: ```--lowvram``` to make it work on GPUs with less than 3GB vram (enabled automatically on GPUs with low vram) From 982338b9bb41301000ddac46d67103af9d0582cd Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 24 Nov 2023 02:08:08 -0500 Subject: [PATCH 133/170] Fix issue loading webp files in UI. --- web/scripts/ui.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/scripts/ui.js b/web/scripts/ui.js index 6f01aa5b2..8a58d30b3 100644 --- a/web/scripts/ui.js +++ b/web/scripts/ui.js @@ -599,7 +599,7 @@ export class ComfyUI { const fileInput = $el("input", { id: "comfy-file-input", type: "file", - accept: ".json,image/png,.latent,.safetensors", + accept: ".json,image/png,.latent,.safetensors,image/webp", style: {display: "none"}, parent: document.body, onchange: () => { From 3e5ea74ad356e849ea27f1d766a7b6d90a5acfda Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 24 Nov 2023 03:55:35 -0500 Subject: [PATCH 134/170] Make buggy xformers fall back on pytorch attention. --- comfy/ldm/modules/attention.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/comfy/ldm/modules/attention.py b/comfy/ldm/modules/attention.py index 947e2008c..d511dda16 100644 --- a/comfy/ldm/modules/attention.py +++ b/comfy/ldm/modules/attention.py @@ -278,9 +278,20 @@ def attention_split(q, k, v, heads, mask=None): ) return r1 +BROKEN_XFORMERS = False +try: + x_vers = xformers.__version__ + #I think 0.0.23 is also broken (q with bs bigger than 65535 gives CUDA error) + BROKEN_XFORMERS = x_vers.startswith("0.0.21") or x_vers.startswith("0.0.22") or x_vers.startswith("0.0.23") +except: + pass + def attention_xformers(q, k, v, heads, mask=None): b, _, dim_head = q.shape dim_head //= heads + if BROKEN_XFORMERS: + if b * heads > 65535: + return attention_pytorch(q, k, v, heads, mask) q, k, v = map( lambda t: t.unsqueeze(3) From eff24ea6aa4f53870f575ec34371b7db940c1cfc Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 24 Nov 2023 11:12:10 -0500 Subject: [PATCH 135/170] Add a node to save animated PNG files. These work in ffpmeg unlike webp. --- comfy_extras/nodes_images.py | 56 ++++++++++++++++++++++++++++++++++++ 1 file changed, 56 insertions(+) diff --git a/comfy_extras/nodes_images.py b/comfy_extras/nodes_images.py index 8c6ae5387..450c8dc40 100644 --- a/comfy_extras/nodes_images.py +++ b/comfy_extras/nodes_images.py @@ -3,6 +3,8 @@ import folder_paths from comfy.cli_args import args from PIL import Image +from PIL.PngImagePlugin import PngInfo + import numpy as np import json import os @@ -112,8 +114,62 @@ class SaveAnimatedWEBP: animated = num_frames != 1 return { "ui": { "images": results, "animated": (animated,) } } +class SaveAnimatedPNG: + def __init__(self): + self.output_dir = folder_paths.get_output_directory() + self.type = "output" + self.prefix_append = "" + + @classmethod + def INPUT_TYPES(s): + return {"required": + {"images": ("IMAGE", ), + "filename_prefix": ("STRING", {"default": "ComfyUI"}), + "fps": ("FLOAT", {"default": 12.0, "min": 0.01, "max": 1000.0, "step": 0.01}), + "compress_level": ("INT", {"default": 4, "min": 0, "max": 9}) + }, + "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}, + } + + RETURN_TYPES = () + FUNCTION = "save_images" + + OUTPUT_NODE = True + + CATEGORY = "_for_testing" + + def save_images(self, images, fps, compress_level, filename_prefix="ComfyUI", prompt=None, extra_pnginfo=None): + filename_prefix += self.prefix_append + full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir, images[0].shape[1], images[0].shape[0]) + results = list() + pil_images = [] + for image in images: + i = 255. * image.cpu().numpy() + img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8)) + pil_images.append(img) + + metadata = None + if not args.disable_metadata: + metadata = PngInfo() + if prompt is not None: + metadata.add_text("prompt", json.dumps(prompt)) + if extra_pnginfo is not None: + for x in extra_pnginfo: + metadata.add_text(x, json.dumps(extra_pnginfo[x])) + + file = f"{filename}_{counter:05}_.png" + pil_images[0].save(os.path.join(full_output_folder, file), pnginfo=metadata, compress_level=compress_level, save_all=True, duration=int(1000.0/fps), append_images=pil_images[1:]) + results.append({ + "filename": file, + "subfolder": subfolder, + "type": self.type + }) + + return { "ui": { "images": results, "animated": (True,)} } + NODE_CLASS_MAPPINGS = { "ImageCrop": ImageCrop, "RepeatImageBatch": RepeatImageBatch, "SaveAnimatedWEBP": SaveAnimatedWEBP, + "SaveAnimatedPNG": SaveAnimatedPNG, } From 916e9c998c5952a30e7795ccfda74186a82a2a06 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 24 Nov 2023 11:19:23 -0500 Subject: [PATCH 136/170] Use same default fps as webp node. --- comfy_extras/nodes_images.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy_extras/nodes_images.py b/comfy_extras/nodes_images.py index 450c8dc40..4c86b2df6 100644 --- a/comfy_extras/nodes_images.py +++ b/comfy_extras/nodes_images.py @@ -125,7 +125,7 @@ class SaveAnimatedPNG: return {"required": {"images": ("IMAGE", ), "filename_prefix": ("STRING", {"default": "ComfyUI"}), - "fps": ("FLOAT", {"default": 12.0, "min": 0.01, "max": 1000.0, "step": 0.01}), + "fps": ("FLOAT", {"default": 6.0, "min": 0.01, "max": 1000.0, "step": 0.01}), "compress_level": ("INT", {"default": 4, "min": 0, "max": 9}) }, "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}, From 8ad5d494d52883e02f5745603dfd06f1a49c040b Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 24 Nov 2023 18:14:17 -0500 Subject: [PATCH 137/170] Fix APNG not working in ffmpeg. --- comfy_extras/nodes_images.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/comfy_extras/nodes_images.py b/comfy_extras/nodes_images.py index 4c86b2df6..4b6cd3d1b 100644 --- a/comfy_extras/nodes_images.py +++ b/comfy_extras/nodes_images.py @@ -152,10 +152,10 @@ class SaveAnimatedPNG: if not args.disable_metadata: metadata = PngInfo() if prompt is not None: - metadata.add_text("prompt", json.dumps(prompt)) + metadata.add(b"tEXt", "prompt".encode("latin-1", "strict") + b"\0" + json.dumps(prompt).encode("latin-1", "strict"), after_idat=True) if extra_pnginfo is not None: for x in extra_pnginfo: - metadata.add_text(x, json.dumps(extra_pnginfo[x])) + metadata.add(b"tEXt", x.encode("latin-1", "strict") + b"\0" + json.dumps(extra_pnginfo[x]).encode("latin-1", "strict"), after_idat=True) file = f"{filename}_{counter:05}_.png" pil_images[0].save(os.path.join(full_output_folder, file), pnginfo=metadata, compress_level=compress_level, save_all=True, duration=int(1000.0/fps), append_images=pil_images[1:]) From e020ab61f97fd8bccc31e7eebd23acd5dd9e2ecd Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 24 Nov 2023 18:24:19 -0500 Subject: [PATCH 138/170] Fix output APNG not working with ffmpeg. --- comfy_extras/nodes_images.py | 4 ++-- web/scripts/pnginfo.js | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/comfy_extras/nodes_images.py b/comfy_extras/nodes_images.py index 4b6cd3d1b..5ad2235a5 100644 --- a/comfy_extras/nodes_images.py +++ b/comfy_extras/nodes_images.py @@ -152,10 +152,10 @@ class SaveAnimatedPNG: if not args.disable_metadata: metadata = PngInfo() if prompt is not None: - metadata.add(b"tEXt", "prompt".encode("latin-1", "strict") + b"\0" + json.dumps(prompt).encode("latin-1", "strict"), after_idat=True) + metadata.add(b"comf", "prompt".encode("latin-1", "strict") + b"\0" + json.dumps(prompt).encode("latin-1", "strict"), after_idat=True) if extra_pnginfo is not None: for x in extra_pnginfo: - metadata.add(b"tEXt", x.encode("latin-1", "strict") + b"\0" + json.dumps(extra_pnginfo[x]).encode("latin-1", "strict"), after_idat=True) + metadata.add(b"comf", x.encode("latin-1", "strict") + b"\0" + json.dumps(extra_pnginfo[x]).encode("latin-1", "strict"), after_idat=True) file = f"{filename}_{counter:05}_.png" pil_images[0].save(os.path.join(full_output_folder, file), pnginfo=metadata, compress_level=compress_level, save_all=True, duration=int(1000.0/fps), append_images=pil_images[1:]) diff --git a/web/scripts/pnginfo.js b/web/scripts/pnginfo.js index f8cbe7a3c..83a4ebc86 100644 --- a/web/scripts/pnginfo.js +++ b/web/scripts/pnginfo.js @@ -24,7 +24,7 @@ export function getPngMetadata(file) { const length = dataView.getUint32(offset); // Get the chunk type const type = String.fromCharCode(...pngData.slice(offset + 4, offset + 8)); - if (type === "tEXt") { + if (type === "tEXt" || type == "comf") { // Get the keyword let keyword_end = offset + 8; while (pngData[keyword_end] !== 0) { From 5d6dfce5481f67bcfb30b1b39ad6eb78022653af Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 24 Nov 2023 20:35:29 -0500 Subject: [PATCH 139/170] Fix importing diffusers unets. --- comfy/model_detection.py | 30 ++++++++++++++++++++---------- 1 file changed, 20 insertions(+), 10 deletions(-) diff --git a/comfy/model_detection.py b/comfy/model_detection.py index 45d603a0c..c682c3e1a 100644 --- a/comfy/model_detection.py +++ b/comfy/model_detection.py @@ -232,52 +232,62 @@ def unet_config_from_diffusers_unet(state_dict, dtype): SDXL = {'use_checkpoint': False, 'image_size': 32, 'out_channels': 4, 'use_spatial_transformer': True, 'legacy': False, 'num_classes': 'sequential', 'adm_in_channels': 2816, 'dtype': dtype, 'in_channels': 4, 'model_channels': 320, 'num_res_blocks': [2, 2, 2], 'transformer_depth': [0, 0, 2, 2, 10, 10], 'channel_mult': [1, 2, 4], 'transformer_depth_middle': 10, - 'use_linear_in_transformer': True, 'context_dim': 2048, 'num_head_channels': 64, 'transformer_depth_output': [0, 0, 0, 2, 2, 2, 10, 10, 10]} + 'use_linear_in_transformer': True, 'context_dim': 2048, 'num_head_channels': 64, 'transformer_depth_output': [0, 0, 0, 2, 2, 2, 10, 10, 10], + 'use_temporal_attention': False, 'use_temporal_resblock': False} SDXL_refiner = {'use_checkpoint': False, 'image_size': 32, 'out_channels': 4, 'use_spatial_transformer': True, 'legacy': False, 'num_classes': 'sequential', 'adm_in_channels': 2560, 'dtype': dtype, 'in_channels': 4, 'model_channels': 384, 'num_res_blocks': [2, 2, 2, 2], 'transformer_depth': [0, 0, 4, 4, 4, 4, 0, 0], 'channel_mult': [1, 2, 4, 4], 'transformer_depth_middle': 4, - 'use_linear_in_transformer': True, 'context_dim': 1280, 'num_head_channels': 64, 'transformer_depth_output': [0, 0, 0, 4, 4, 4, 4, 4, 4, 0, 0, 0]} + 'use_linear_in_transformer': True, 'context_dim': 1280, 'num_head_channels': 64, 'transformer_depth_output': [0, 0, 0, 4, 4, 4, 4, 4, 4, 0, 0, 0], + 'use_temporal_attention': False, 'use_temporal_resblock': False} SD21 = {'use_checkpoint': False, 'image_size': 32, 'out_channels': 4, 'use_spatial_transformer': True, 'legacy': False, 'adm_in_channels': None, 'dtype': dtype, 'in_channels': 4, 'model_channels': 320, 'num_res_blocks': [2, 2, 2, 2], 'transformer_depth': [1, 1, 1, 1, 1, 1, 0, 0], 'channel_mult': [1, 2, 4, 4], 'transformer_depth_middle': 1, 'use_linear_in_transformer': True, - 'context_dim': 1024, 'num_head_channels': 64, 'transformer_depth_output': [1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0]} + 'context_dim': 1024, 'num_head_channels': 64, 'transformer_depth_output': [1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0], + 'use_temporal_attention': False, 'use_temporal_resblock': False} SD21_uncliph = {'use_checkpoint': False, 'image_size': 32, 'out_channels': 4, 'use_spatial_transformer': True, 'legacy': False, 'num_classes': 'sequential', 'adm_in_channels': 2048, 'dtype': dtype, 'in_channels': 4, 'model_channels': 320, 'num_res_blocks': [2, 2, 2, 2], 'transformer_depth': [1, 1, 1, 1, 1, 1, 0, 0], 'channel_mult': [1, 2, 4, 4], 'transformer_depth_middle': 1, - 'use_linear_in_transformer': True, 'context_dim': 1024, 'num_head_channels': 64, 'transformer_depth_output': [1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0]} + 'use_linear_in_transformer': True, 'context_dim': 1024, 'num_head_channels': 64, 'transformer_depth_output': [1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0], + 'use_temporal_attention': False, 'use_temporal_resblock': False} SD21_unclipl = {'use_checkpoint': False, 'image_size': 32, 'out_channels': 4, 'use_spatial_transformer': True, 'legacy': False, 'num_classes': 'sequential', 'adm_in_channels': 1536, 'dtype': dtype, 'in_channels': 4, 'model_channels': 320, 'num_res_blocks': [2, 2, 2, 2], 'transformer_depth': [1, 1, 1, 1, 1, 1, 0, 0], 'channel_mult': [1, 2, 4, 4], 'transformer_depth_middle': 1, - 'use_linear_in_transformer': True, 'context_dim': 1024, 'num_head_channels': 64, 'transformer_depth_output': [1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0]} + 'use_linear_in_transformer': True, 'context_dim': 1024, 'num_head_channels': 64, 'transformer_depth_output': [1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0], + 'use_temporal_attention': False, 'use_temporal_resblock': False} SD15 = {'use_checkpoint': False, 'image_size': 32, 'out_channels': 4, 'use_spatial_transformer': True, 'legacy': False, 'adm_in_channels': None, 'dtype': dtype, 'in_channels': 4, 'model_channels': 320, 'num_res_blocks': [2, 2, 2, 2], 'transformer_depth': [1, 1, 1, 1, 1, 1, 0, 0], 'channel_mult': [1, 2, 4, 4], 'transformer_depth_middle': 1, 'use_linear_in_transformer': False, 'context_dim': 768, 'num_heads': 8, - 'transformer_depth_output': [1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0]} + 'transformer_depth_output': [1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0], + 'use_temporal_attention': False, 'use_temporal_resblock': False} SDXL_mid_cnet = {'use_checkpoint': False, 'image_size': 32, 'out_channels': 4, 'use_spatial_transformer': True, 'legacy': False, 'num_classes': 'sequential', 'adm_in_channels': 2816, 'dtype': dtype, 'in_channels': 4, 'model_channels': 320, 'num_res_blocks': [2, 2, 2], 'transformer_depth': [0, 0, 0, 0, 1, 1], 'channel_mult': [1, 2, 4], 'transformer_depth_middle': 1, - 'use_linear_in_transformer': True, 'context_dim': 2048, 'num_head_channels': 64, 'transformer_depth_output': [0, 0, 0, 0, 0, 0, 1, 1, 1]} + 'use_linear_in_transformer': True, 'context_dim': 2048, 'num_head_channels': 64, 'transformer_depth_output': [0, 0, 0, 0, 0, 0, 1, 1, 1], + 'use_temporal_attention': False, 'use_temporal_resblock': False} SDXL_small_cnet = {'use_checkpoint': False, 'image_size': 32, 'out_channels': 4, 'use_spatial_transformer': True, 'legacy': False, 'num_classes': 'sequential', 'adm_in_channels': 2816, 'dtype': dtype, 'in_channels': 4, 'model_channels': 320, 'num_res_blocks': [2, 2, 2], 'transformer_depth': [0, 0, 0, 0, 0, 0], 'channel_mult': [1, 2, 4], 'transformer_depth_middle': 0, - 'use_linear_in_transformer': True, 'num_head_channels': 64, 'context_dim': 1, 'transformer_depth_output': [0, 0, 0, 0, 0, 0, 0, 0, 0]} + 'use_linear_in_transformer': True, 'num_head_channels': 64, 'context_dim': 1, 'transformer_depth_output': [0, 0, 0, 0, 0, 0, 0, 0, 0], + 'use_temporal_attention': False, 'use_temporal_resblock': False} SDXL_diffusers_inpaint = {'use_checkpoint': False, 'image_size': 32, 'out_channels': 4, 'use_spatial_transformer': True, 'legacy': False, 'num_classes': 'sequential', 'adm_in_channels': 2816, 'dtype': dtype, 'in_channels': 9, 'model_channels': 320, 'num_res_blocks': [2, 2, 2], 'transformer_depth': [0, 0, 2, 2, 10, 10], 'channel_mult': [1, 2, 4], 'transformer_depth_middle': 10, - 'use_linear_in_transformer': True, 'context_dim': 2048, 'num_head_channels': 64, 'transformer_depth_output': [0, 0, 0, 2, 2, 2, 10, 10, 10]} + 'use_linear_in_transformer': True, 'context_dim': 2048, 'num_head_channels': 64, 'transformer_depth_output': [0, 0, 0, 2, 2, 2, 10, 10, 10], + 'use_temporal_attention': False, 'use_temporal_resblock': False} SSD_1B = {'use_checkpoint': False, 'image_size': 32, 'out_channels': 4, 'use_spatial_transformer': True, 'legacy': False, 'num_classes': 'sequential', 'adm_in_channels': 2816, 'dtype': dtype, 'in_channels': 4, 'model_channels': 320, 'num_res_blocks': [2, 2, 2], 'transformer_depth': [0, 0, 2, 2, 4, 4], 'transformer_depth_output': [0, 0, 0, 1, 1, 2, 10, 4, 4], - 'channel_mult': [1, 2, 4], 'transformer_depth_middle': -1, 'use_linear_in_transformer': True, 'context_dim': 2048, 'num_head_channels': 64} + 'channel_mult': [1, 2, 4], 'transformer_depth_middle': -1, 'use_linear_in_transformer': True, 'context_dim': 2048, 'num_head_channels': 64, + 'use_temporal_attention': False, 'use_temporal_resblock': False} supported_models = [SDXL, SDXL_refiner, SD21, SD15, SD21_uncliph, SD21_unclipl, SDXL_mid_cnet, SDXL_small_cnet, SDXL_diffusers_inpaint, SSD_1B] From 5b37270d3ad2227a30e15101a8d528ca77bd589d Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sat, 25 Nov 2023 02:26:50 -0500 Subject: [PATCH 140/170] Add a lora loader node for models with no CLIP. --- nodes.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/nodes.py b/nodes.py index bb24bc6e8..df40f8094 100644 --- a/nodes.py +++ b/nodes.py @@ -572,6 +572,19 @@ class LoraLoader: model_lora, clip_lora = comfy.sd.load_lora_for_models(model, clip, lora, strength_model, strength_clip) return (model_lora, clip_lora) +class LoraLoaderModelOnly(LoraLoader): + @classmethod + def INPUT_TYPES(s): + return {"required": { "model": ("MODEL",), + "lora_name": (folder_paths.get_filename_list("loras"), ), + "strength_model": ("FLOAT", {"default": 1.0, "min": -20.0, "max": 20.0, "step": 0.01}), + }} + RETURN_TYPES = ("MODEL",) + FUNCTION = "load_lora_model_only" + + def load_lora_model_only(self, model, lora_name, strength_model): + return (self.load_lora(model, None, lora_name, strength_model, 0)[0],) + class VAELoader: @staticmethod def vae_list(): @@ -1703,6 +1716,7 @@ NODE_CLASS_MAPPINGS = { "ConditioningZeroOut": ConditioningZeroOut, "ConditioningSetTimestepRange": ConditioningSetTimestepRange, + "LoraLoaderModelOnly": LoraLoaderModelOnly, } NODE_DISPLAY_NAME_MAPPINGS = { From 50dc39d6ec5420f35b81f965c106b6710ff48e6e Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sun, 26 Nov 2023 03:13:56 -0500 Subject: [PATCH 141/170] Clean up the extra_options dict for the transformer patches. Now everything in transformer_options gets put in extra_options. --- comfy/ldm/modules/attention.py | 31 ++++++------------- .../modules/diffusionmodules/openaimodel.py | 11 ++++--- 2 files changed, 16 insertions(+), 26 deletions(-) diff --git a/comfy/ldm/modules/attention.py b/comfy/ldm/modules/attention.py index d511dda16..7dc1a1b5c 100644 --- a/comfy/ldm/modules/attention.py +++ b/comfy/ldm/modules/attention.py @@ -430,31 +430,20 @@ class BasicTransformerBlock(nn.Module): extra_options = {} block = None block_index = 0 - if "current_index" in transformer_options: - extra_options["transformer_index"] = transformer_options["current_index"] - if "block_index" in transformer_options: - block_index = transformer_options["block_index"] - extra_options["block_index"] = block_index - if "original_shape" in transformer_options: - extra_options["original_shape"] = transformer_options["original_shape"] - if "block" in transformer_options: - block = transformer_options["block"] - extra_options["block"] = block - if "cond_or_uncond" in transformer_options: - extra_options["cond_or_uncond"] = transformer_options["cond_or_uncond"] - if "patches" in transformer_options: - transformer_patches = transformer_options["patches"] - else: - transformer_patches = {} + transformer_patches = {} + transformer_patches_replace = {} + + for k in transformer_options: + if k == "patches": + transformer_patches = transformer_options[k] + elif k == "patches_replace": + transformer_patches_replace = transformer_options[k] + else: + extra_options[k] = transformer_options[k] extra_options["n_heads"] = self.n_heads extra_options["dim_head"] = self.d_head - if "patches_replace" in transformer_options: - transformer_patches_replace = transformer_options["patches_replace"] - else: - transformer_patches_replace = {} - if self.ff_in: x_skip = x x = self.ff_in(self.norm_in(x)) diff --git a/comfy/ldm/modules/diffusionmodules/openaimodel.py b/comfy/ldm/modules/diffusionmodules/openaimodel.py index a497ed344..48264892c 100644 --- a/comfy/ldm/modules/diffusionmodules/openaimodel.py +++ b/comfy/ldm/modules/diffusionmodules/openaimodel.py @@ -31,7 +31,7 @@ class TimestepBlock(nn.Module): Apply the module to `x` given `emb` timestep embeddings. """ -#This is needed because accelerate makes a copy of transformer_options which breaks "current_index" +#This is needed because accelerate makes a copy of transformer_options which breaks "transformer_index" def forward_timestep_embed(ts, x, emb, context=None, transformer_options={}, output_shape=None, time_context=None, num_video_frames=None, image_only_indicator=None): for layer in ts: if isinstance(layer, VideoResBlock): @@ -40,11 +40,12 @@ def forward_timestep_embed(ts, x, emb, context=None, transformer_options={}, out x = layer(x, emb) elif isinstance(layer, SpatialVideoTransformer): x = layer(x, context, time_context, num_video_frames, image_only_indicator, transformer_options) - transformer_options["current_index"] += 1 + if "transformer_index" in transformer_options: + transformer_options["transformer_index"] += 1 elif isinstance(layer, SpatialTransformer): x = layer(x, context, transformer_options) - if "current_index" in transformer_options: - transformer_options["current_index"] += 1 + if "transformer_index" in transformer_options: + transformer_options["transformer_index"] += 1 elif isinstance(layer, Upsample): x = layer(x, output_shape=output_shape) else: @@ -830,7 +831,7 @@ class UNetModel(nn.Module): :return: an [N x C x ...] Tensor of outputs. """ transformer_options["original_shape"] = list(x.shape) - transformer_options["current_index"] = 0 + transformer_options["transformer_index"] = 0 transformer_patches = transformer_options.get("patches", {}) num_video_frames = kwargs.get("num_video_frames", self.default_num_video_frames) From 39e75862b248a20e8233ccee743ba5b2e977cdcf Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sun, 26 Nov 2023 03:43:02 -0500 Subject: [PATCH 142/170] Fix regression from last commit. --- comfy/ldm/modules/attention.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/comfy/ldm/modules/attention.py b/comfy/ldm/modules/attention.py index 7dc1a1b5c..f68452382 100644 --- a/comfy/ldm/modules/attention.py +++ b/comfy/ldm/modules/attention.py @@ -428,8 +428,8 @@ class BasicTransformerBlock(nn.Module): def _forward(self, x, context=None, transformer_options={}): extra_options = {} - block = None - block_index = 0 + block = transformer_options.get("block", None) + block_index = transformer_options.get("block_index", 0) transformer_patches = {} transformer_patches_replace = {} From 6aa1bcd601dfdcb4485ea31947ffbf992a5b54fc Mon Sep 17 00:00:00 2001 From: Jack Bauer <2308123+dmx974@users.noreply.github.com> Date: Sun, 26 Nov 2023 17:23:11 +0400 Subject: [PATCH 143/170] Remove hard coded max_items in history API --- web/scripts/api.js | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/web/scripts/api.js b/web/scripts/api.js index de56b2310..9aa7528af 100644 --- a/web/scripts/api.js +++ b/web/scripts/api.js @@ -254,9 +254,9 @@ class ComfyApi extends EventTarget { * Gets the prompt execution history * @returns Prompt history including node outputs */ - async getHistory() { + async getHistory(max_items=200) { try { - const res = await this.fetchApi("/history?max_items=200"); + const res = await this.fetchApi(`/history?max_items=${max_items}`); return { History: Object.values(await res.json()) }; } catch (error) { console.error(error); From edd6f75d3ad243e6c2d38f2d94191da40d12b2f3 Mon Sep 17 00:00:00 2001 From: David Jeske Date: Sun, 26 Nov 2023 13:10:31 -0700 Subject: [PATCH 144/170] better error for invalid output paths --- folder_paths.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/folder_paths.py b/folder_paths.py index 4a38deec0..5479fd7b2 100644 --- a/folder_paths.py +++ b/folder_paths.py @@ -228,8 +228,12 @@ def get_save_image_path(filename_prefix, output_dir, image_width=0, image_height full_output_folder = os.path.join(output_dir, subfolder) if os.path.commonpath((output_dir, os.path.abspath(full_output_folder))) != output_dir: - print("Saving image outside the output folder is not allowed.") - return {} + err = "**** ERROR: Saving image outside the output folder is not allowed." + \ + "\n full_output_folder: " + os.path.abspath(full_output_folder) + \ + "\n output_dir: " + output_dir + \ + "\n commonpath: " + os.path.commonpath((output_dir, os.path.abspath(full_output_folder))) + print(err) + raise Exception(err) try: counter = max(filter(lambda a: a[1][:-1] == filename and a[1][-1] == "_", map(map_filename, os.listdir(full_output_folder))))[0] + 1 From 34eccd863bb41f48346de178a55be308dc36e5e5 Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Mon, 27 Nov 2023 14:00:15 +0000 Subject: [PATCH 145/170] Add simple undo redo history --- web/extensions/core/undoRedo.js | 150 ++++++++++++++++++++++++++++++++ 1 file changed, 150 insertions(+) create mode 100644 web/extensions/core/undoRedo.js diff --git a/web/extensions/core/undoRedo.js b/web/extensions/core/undoRedo.js new file mode 100644 index 000000000..1c1d785a8 --- /dev/null +++ b/web/extensions/core/undoRedo.js @@ -0,0 +1,150 @@ +import { app } from "../../scripts/app.js"; + +const MAX_HISTORY = 50; + +let undo = []; +let redo = []; +let activeState = null; +let isOurLoad = false; +function checkState() { + const currentState = app.graph.serialize(); + if (!graphEqual(activeState, currentState)) { + undo.push(activeState); + if(undo.length > MAX_HISTORY) { + undo.shift(); + } + activeState = clone(currentState); + redo.length = 0; + } +} + +const loadGraphData = app.loadGraphData; +app.loadGraphData = async function () { + const v = await loadGraphData.apply(this, arguments); + if (isOurLoad) { + isOurLoad = false; + } else { + checkState(); + } + return v; +}; + +function clone(obj) { + try { + if (typeof structuredClone !== "undefined") { + return structuredClone(obj); + } + } catch (error) { + // structuredClone is stricter than using JSON.parse/stringify so fallback to that + } + + return JSON.parse(JSON.stringify(obj)); +} + +function graphEqual(a, b, root = true) { + if (a === b) return true; + + if (typeof a == "object" && a && typeof b == "object" && b) { + const keys = Object.getOwnPropertyNames(a); + + if (keys.length != Object.getOwnPropertyNames(b).length) { + return false; + } + + for (const key of keys) { + let av = a[key]; + let bv = b[key]; + if (root && key === "nodes") { + // Nodes need to be sorted as the order changes when selecting nodes + av = [...av].sort((a, b) => a.id - b.id); + bv = [...bv].sort((a, b) => a.id - b.id); + } + if (!graphEqual(av, bv, false)) { + return false; + } + } + + return true; + } + + return false; +} + +const undoRedo = async (e) => { + if (e.ctrlKey || e.metaKey) { + if (e.key === "y") { + const prevState = redo.pop(); + if (prevState) { + undo.push(activeState); + isOurLoad = true; + await app.loadGraphData(prevState); + activeState = prevState; + } + return true; + } else if (e.key === "z") { + const prevState = undo.pop(); + if (prevState) { + redo.push(activeState); + isOurLoad = true; + await app.loadGraphData(prevState); + activeState = prevState; + } + return true; + } + } +}; + +const bindInput = (activeEl) => { + if (activeEl?.tagName !== "CANVAS" && activeEl?.tagName !== "BODY") { + for (const evt of ["change", "input", "blur"]) { + if (`on${evt}` in activeEl) { + const listener = () => { + checkState(); + activeEl.removeEventListener(evt, listener); + }; + activeEl.addEventListener(evt, listener); + return true; + } + } + } +}; + +window.addEventListener( + "keydown", + (e) => { + requestAnimationFrame(async () => { + const activeEl = document.activeElement; + if (activeEl?.tagName === "INPUT" || activeEl?.type === "textarea") { + // Ignore events on inputs, they have their native history + return; + } + + // Check if this is a ctrl+z ctrl+y + if (await undoRedo(e)) return; + + // If our active element is some type of input then handle changes after they're done + if (bindInput(activeEl)) return; + checkState(); + }); + }, + true +); + +// Handle clicking DOM elements (e.g. widgets) +window.addEventListener("mouseup", () => { + checkState(); +}); + +// Handle litegraph clicks +const processMouseUp = LGraphCanvas.prototype.processMouseUp; +LGraphCanvas.prototype.processMouseUp = function (e) { + const v = processMouseUp.apply(this, arguments); + checkState(); + return v; +}; +const processMouseDown = LGraphCanvas.prototype.processMouseDown; +LGraphCanvas.prototype.processMouseDown = function (e) { + const v = processMouseDown.apply(this, arguments); + checkState(); + return v; +}; From 9be0b30cf1f69384e72823f5112072b15f1f431d Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Mon, 27 Nov 2023 14:02:50 +0000 Subject: [PATCH 146/170] fix formatting --- web/extensions/core/undoRedo.js | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/web/extensions/core/undoRedo.js b/web/extensions/core/undoRedo.js index 1c1d785a8..c6613b0f0 100644 --- a/web/extensions/core/undoRedo.js +++ b/web/extensions/core/undoRedo.js @@ -10,9 +10,9 @@ function checkState() { const currentState = app.graph.serialize(); if (!graphEqual(activeState, currentState)) { undo.push(activeState); - if(undo.length > MAX_HISTORY) { - undo.shift(); - } + if (undo.length > MAX_HISTORY) { + undo.shift(); + } activeState = clone(currentState); redo.length = 0; } From be71bb5e13d716c541a5372a518e9d512073fe18 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 27 Nov 2023 14:04:16 -0500 Subject: [PATCH 147/170] Tweak memory inference calculations a bit. --- comfy/model_base.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/comfy/model_base.py b/comfy/model_base.py index 34274c4ae..3d6879ae6 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -164,12 +164,13 @@ class BaseModel(torch.nn.Module): self.inpaint_model = True def memory_required(self, input_shape): - area = input_shape[0] * input_shape[2] * input_shape[3] if comfy.model_management.xformers_enabled() or comfy.model_management.pytorch_attention_flash_attention(): #TODO: this needs to be tweaked - return (area / (comfy.model_management.dtype_size(self.get_dtype()) * 10)) * (1024 * 1024) + area = max(input_shape[0], 3) * input_shape[2] * input_shape[3] + return (area * comfy.model_management.dtype_size(self.get_dtype()) / 60) * (1024 * 1024) else: #TODO: this formula might be too aggressive since I tweaked the sub-quad and split algorithms to use less memory. + area = input_shape[0] * input_shape[2] * input_shape[3] return (((area * 0.6) / 0.9) + 1024) * (1024 * 1024) From 13fdee6abf7a7b072ad0f1ebbaa76aca13ddd2a8 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 27 Nov 2023 14:55:40 -0500 Subject: [PATCH 148/170] Try to free memory for both cond+uncond before inference. --- comfy/model_base.py | 4 ++-- comfy/sample.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/comfy/model_base.py b/comfy/model_base.py index 3d6879ae6..786c9cf47 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -166,8 +166,8 @@ class BaseModel(torch.nn.Module): def memory_required(self, input_shape): if comfy.model_management.xformers_enabled() or comfy.model_management.pytorch_attention_flash_attention(): #TODO: this needs to be tweaked - area = max(input_shape[0], 3) * input_shape[2] * input_shape[3] - return (area * comfy.model_management.dtype_size(self.get_dtype()) / 60) * (1024 * 1024) + area = input_shape[0] * input_shape[2] * input_shape[3] + return (area * comfy.model_management.dtype_size(self.get_dtype()) / 50) * (1024 * 1024) else: #TODO: this formula might be too aggressive since I tweaked the sub-quad and split algorithms to use less memory. area = input_shape[0] * input_shape[2] * input_shape[3] diff --git a/comfy/sample.py b/comfy/sample.py index 4bfdb8ce5..034db97ee 100644 --- a/comfy/sample.py +++ b/comfy/sample.py @@ -83,7 +83,7 @@ def prepare_sampling(model, noise_shape, positive, negative, noise_mask): real_model = None models, inference_memory = get_additional_models(positive, negative, model.model_dtype()) - comfy.model_management.load_models_gpu([model] + models, model.memory_required(noise_shape) + inference_memory) + comfy.model_management.load_models_gpu([model] + models, model.memory_required([noise_shape[0] * 2] + list(noise_shape[1:])) + inference_memory) real_model = model.model return real_model, positive, negative, noise_mask, models From 488de0b4df524589c11a9bd0e2b3663d03003342 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 27 Nov 2023 16:32:03 -0500 Subject: [PATCH 149/170] ModelSamplingDiscreteLCM -> ModelSamplingDiscreteDistilled --- comfy_extras/nodes_model_advanced.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/comfy_extras/nodes_model_advanced.py b/comfy_extras/nodes_model_advanced.py index 6991c9837..20261aade 100644 --- a/comfy_extras/nodes_model_advanced.py +++ b/comfy_extras/nodes_model_advanced.py @@ -17,7 +17,9 @@ class LCM(comfy.model_sampling.EPS): return c_out * x0 + c_skip * model_input -class ModelSamplingDiscreteLCM(torch.nn.Module): +class ModelSamplingDiscreteDistilled(torch.nn.Module): + original_timesteps = 50 + def __init__(self): super().__init__() self.sigma_data = 1.0 @@ -29,13 +31,12 @@ class ModelSamplingDiscreteLCM(torch.nn.Module): alphas = 1.0 - betas alphas_cumprod = torch.cumprod(alphas, dim=0) - original_timesteps = 50 - self.skip_steps = timesteps // original_timesteps + self.skip_steps = timesteps // self.original_timesteps - alphas_cumprod_valid = torch.zeros((original_timesteps), dtype=torch.float32) - for x in range(original_timesteps): - alphas_cumprod_valid[original_timesteps - 1 - x] = alphas_cumprod[timesteps - 1 - x * self.skip_steps] + alphas_cumprod_valid = torch.zeros((self.original_timesteps), dtype=torch.float32) + for x in range(self.original_timesteps): + alphas_cumprod_valid[self.original_timesteps - 1 - x] = alphas_cumprod[timesteps - 1 - x * self.skip_steps] sigmas = ((1 - alphas_cumprod_valid) / alphas_cumprod_valid) ** 0.5 self.set_sigmas(sigmas) @@ -116,7 +117,7 @@ class ModelSamplingDiscrete: sampling_type = comfy.model_sampling.V_PREDICTION elif sampling == "lcm": sampling_type = LCM - sampling_base = ModelSamplingDiscreteLCM + sampling_base = ModelSamplingDiscreteDistilled class ModelSamplingAdvanced(sampling_base, sampling_type): pass From f30b992b18078415f7c31c6c2f5ad1513db0bf5e Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 27 Nov 2023 16:41:33 -0500 Subject: [PATCH 150/170] .sigma and .timestep now return tensors on the same device as the input. --- comfy/model_sampling.py | 6 +++--- comfy_extras/nodes_model_advanced.py | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/comfy/model_sampling.py b/comfy/model_sampling.py index fac5c995e..69c8b1f01 100644 --- a/comfy/model_sampling.py +++ b/comfy/model_sampling.py @@ -65,15 +65,15 @@ class ModelSamplingDiscrete(torch.nn.Module): def timestep(self, sigma): log_sigma = sigma.log() dists = log_sigma.to(self.log_sigmas.device) - self.log_sigmas[:, None] - return dists.abs().argmin(dim=0).view(sigma.shape) + return dists.abs().argmin(dim=0).view(sigma.shape).to(sigma.device) def sigma(self, timestep): - t = torch.clamp(timestep.float(), min=0, max=(len(self.sigmas) - 1)) + t = torch.clamp(timestep.float().to(self.log_sigmas.device), min=0, max=(len(self.sigmas) - 1)) low_idx = t.floor().long() high_idx = t.ceil().long() w = t.frac() log_sigma = (1 - w) * self.log_sigmas[low_idx] + w * self.log_sigmas[high_idx] - return log_sigma.exp() + return log_sigma.exp().to(timestep.device) def percent_to_sigma(self, percent): if percent <= 0.0: diff --git a/comfy_extras/nodes_model_advanced.py b/comfy_extras/nodes_model_advanced.py index 20261aade..efcdf1932 100644 --- a/comfy_extras/nodes_model_advanced.py +++ b/comfy_extras/nodes_model_advanced.py @@ -56,15 +56,15 @@ class ModelSamplingDiscreteDistilled(torch.nn.Module): def timestep(self, sigma): log_sigma = sigma.log() dists = log_sigma.to(self.log_sigmas.device) - self.log_sigmas[:, None] - return dists.abs().argmin(dim=0).view(sigma.shape) * self.skip_steps + (self.skip_steps - 1) + return (dists.abs().argmin(dim=0).view(sigma.shape) * self.skip_steps + (self.skip_steps - 1)).to(sigma.device) def sigma(self, timestep): - t = torch.clamp(((timestep - (self.skip_steps - 1)) / self.skip_steps).float(), min=0, max=(len(self.sigmas) - 1)) + t = torch.clamp(((timestep.float().to(self.log_sigmas.device) - (self.skip_steps - 1)) / self.skip_steps).float(), min=0, max=(len(self.sigmas) - 1)) low_idx = t.floor().long() high_idx = t.ceil().long() w = t.frac() log_sigma = (1 - w) * self.log_sigmas[low_idx] + w * self.log_sigmas[high_idx] - return log_sigma.exp() + return log_sigma.exp().to(timestep.device) def percent_to_sigma(self, percent): if percent <= 0.0: From c45d1b9b67a98c9ff9743b93caf8303286a430c3 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 27 Nov 2023 17:32:07 -0500 Subject: [PATCH 151/170] Add a function to load a unet from a state dict. --- comfy/sd.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/comfy/sd.py b/comfy/sd.py index 7f85540c4..53c79e1c5 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -481,20 +481,18 @@ def load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, o return (model_patcher, clip, vae, clipvision) -def load_unet(unet_path): #load unet in diffusers format - sd = comfy.utils.load_torch_file(unet_path) +def load_unet_state_dict(sd): #load unet in diffusers format parameters = comfy.utils.calculate_parameters(sd) unet_dtype = model_management.unet_dtype(model_params=parameters) if "input_blocks.0.0.weight" in sd: #ldm model_config = model_detection.model_config_from_unet(sd, "", unet_dtype) if model_config is None: - raise RuntimeError("ERROR: Could not detect model type of: {}".format(unet_path)) + return None new_sd = sd else: #diffusers model_config = model_detection.model_config_from_diffusers_unet(sd, unet_dtype) if model_config is None: - print("ERROR UNSUPPORTED UNET", unet_path) return None diffusers_keys = comfy.utils.unet_to_diffusers(model_config.unet_config) @@ -514,6 +512,14 @@ def load_unet(unet_path): #load unet in diffusers format print("left over keys in unet:", left_over) return comfy.model_patcher.ModelPatcher(model, load_device=model_management.get_torch_device(), offload_device=offload_device) +def load_unet(unet_path): + sd = comfy.utils.load_torch_file(unet_path) + model = load_unet_state_dict(sd) + if model is None: + print("ERROR UNSUPPORTED UNET", unet_path) + raise RuntimeError("ERROR: Could not detect model type of: {}".format(unet_path)) + return model + def save_checkpoint(output_path, model, clip, vae, metadata=None): model_management.load_models_gpu([model, clip.load_model()]) sd = model.model.state_dict_for_saving(clip.get_sd(), vae.get_sd()) From 798a34d009cd78f02bd4c0b30f1c9fd6a594d345 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 28 Nov 2023 04:57:59 -0500 Subject: [PATCH 152/170] Lower compress level for image preview. --- nodes.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/nodes.py b/nodes.py index df40f8094..8b4a9b119 100644 --- a/nodes.py +++ b/nodes.py @@ -1337,6 +1337,7 @@ class SaveImage: self.output_dir = folder_paths.get_output_directory() self.type = "output" self.prefix_append = "" + self.compress_level = 4 @classmethod def INPUT_TYPES(s): @@ -1370,7 +1371,7 @@ class SaveImage: metadata.add_text(x, json.dumps(extra_pnginfo[x])) file = f"{filename}_{counter:05}_.png" - img.save(os.path.join(full_output_folder, file), pnginfo=metadata, compress_level=4) + img.save(os.path.join(full_output_folder, file), pnginfo=metadata, compress_level=self.compress_level) results.append({ "filename": file, "subfolder": subfolder, @@ -1385,6 +1386,7 @@ class PreviewImage(SaveImage): self.output_dir = folder_paths.get_temp_directory() self.type = "temp" self.prefix_append = "_temp_" + ''.join(random.choice("abcdefghijklmnopqrstupvxyz") for x in range(5)) + self.compress_level = 1 @classmethod def INPUT_TYPES(s): From 983ebc579212e209f52dff014b79bfe1932c0959 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 28 Nov 2023 04:58:32 -0500 Subject: [PATCH 153/170] Use smart model management for VAE to decrease latency. --- comfy/sd.py | 18 +++++++----------- 1 file changed, 7 insertions(+), 11 deletions(-) diff --git a/comfy/sd.py b/comfy/sd.py index 53c79e1c5..f4f84d0a0 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -187,10 +187,12 @@ class VAE: if device is None: device = model_management.vae_device() self.device = device - self.offload_device = model_management.vae_offload_device() + offload_device = model_management.vae_offload_device() self.vae_dtype = model_management.vae_dtype() self.first_stage_model.to(self.vae_dtype) + self.patcher = comfy.model_patcher.ModelPatcher(self.first_stage_model, load_device=self.device, offload_device=offload_device) + def decode_tiled_(self, samples, tile_x=64, tile_y=64, overlap = 16): steps = samples.shape[0] * comfy.utils.get_tiled_scale_steps(samples.shape[3], samples.shape[2], tile_x, tile_y, overlap) steps += samples.shape[0] * comfy.utils.get_tiled_scale_steps(samples.shape[3], samples.shape[2], tile_x // 2, tile_y * 2, overlap) @@ -219,10 +221,9 @@ class VAE: return samples def decode(self, samples_in): - self.first_stage_model = self.first_stage_model.to(self.device) try: memory_used = self.memory_used_decode(samples_in.shape, self.vae_dtype) - model_management.free_memory(memory_used, self.device) + model_management.load_models_gpu([self.patcher], memory_required=memory_used) free_memory = model_management.get_free_memory(self.device) batch_number = int(free_memory / memory_used) batch_number = max(1, batch_number) @@ -235,22 +236,19 @@ class VAE: print("Warning: Ran out of memory when regular VAE decoding, retrying with tiled VAE decoding.") pixel_samples = self.decode_tiled_(samples_in) - self.first_stage_model = self.first_stage_model.to(self.offload_device) pixel_samples = pixel_samples.cpu().movedim(1,-1) return pixel_samples def decode_tiled(self, samples, tile_x=64, tile_y=64, overlap = 16): - self.first_stage_model = self.first_stage_model.to(self.device) + model_management.load_model_gpu(self.patcher) output = self.decode_tiled_(samples, tile_x, tile_y, overlap) - self.first_stage_model = self.first_stage_model.to(self.offload_device) return output.movedim(1,-1) def encode(self, pixel_samples): - self.first_stage_model = self.first_stage_model.to(self.device) pixel_samples = pixel_samples.movedim(-1,1) try: memory_used = self.memory_used_encode(pixel_samples.shape, self.vae_dtype) - model_management.free_memory(memory_used, self.device) + model_management.load_models_gpu([self.patcher], memory_required=memory_used) free_memory = model_management.get_free_memory(self.device) batch_number = int(free_memory / memory_used) batch_number = max(1, batch_number) @@ -263,14 +261,12 @@ class VAE: print("Warning: Ran out of memory when regular VAE encoding, retrying with tiled VAE encoding.") samples = self.encode_tiled_(pixel_samples) - self.first_stage_model = self.first_stage_model.to(self.offload_device) return samples def encode_tiled(self, pixel_samples, tile_x=512, tile_y=512, overlap = 64): - self.first_stage_model = self.first_stage_model.to(self.device) + model_management.load_model_gpu(self.patcher) pixel_samples = pixel_samples.movedim(-1,1) samples = self.encode_tiled_(pixel_samples, tile_x=tile_x, tile_y=tile_y, overlap=overlap) - self.first_stage_model = self.first_stage_model.to(self.offload_device) return samples def get_sd(self): From 21063fa35b53683f6ca01ccf1a5d5b509f702ba7 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 28 Nov 2023 11:01:05 -0500 Subject: [PATCH 154/170] Lower compress level of png sent on websocket. --- server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server.py b/server.py index 1a8e92b8f..9b1e3269d 100644 --- a/server.py +++ b/server.py @@ -576,7 +576,7 @@ class PromptServer(): bytesIO = BytesIO() header = struct.pack(">I", type_num) bytesIO.write(header) - image.save(bytesIO, format=image_type, quality=95, compress_level=4) + image.save(bytesIO, format=image_type, quality=95, compress_level=1) preview_bytes = bytesIO.getvalue() await self.send_bytes(BinaryEventTypes.PREVIEW_IMAGE, preview_bytes, sid=sid) From 57d7f4464f2a40521666cc8436711f73bf728a97 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 28 Nov 2023 13:35:32 -0500 Subject: [PATCH 155/170] Add SDTurboScheduler node. --- comfy_extras/nodes_custom_sampler.py | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/comfy_extras/nodes_custom_sampler.py b/comfy_extras/nodes_custom_sampler.py index d3c1d4a23..008d0b8d6 100644 --- a/comfy_extras/nodes_custom_sampler.py +++ b/comfy_extras/nodes_custom_sampler.py @@ -81,6 +81,25 @@ class PolyexponentialScheduler: sigmas = k_diffusion_sampling.get_sigmas_polyexponential(n=steps, sigma_min=sigma_min, sigma_max=sigma_max, rho=rho) return (sigmas, ) +class SDTurboScheduler: + @classmethod + def INPUT_TYPES(s): + return {"required": + {"model": ("MODEL",), + "steps": ("INT", {"default": 1, "min": 1, "max": 10}), + } + } + RETURN_TYPES = ("SIGMAS",) + CATEGORY = "sampling/custom_sampling/schedulers" + + FUNCTION = "get_sigmas" + + def get_sigmas(self, model, steps): + timesteps = torch.flip(torch.arange(1, 11) * 100 - 1, (0,))[:steps] + sigmas = model.model.model_sampling.sigma(timesteps) + sigmas = torch.cat([sigmas, sigmas.new_zeros([1])]) + return (sigmas, ) + class VPScheduler: @classmethod def INPUT_TYPES(s): @@ -257,6 +276,7 @@ NODE_CLASS_MAPPINGS = { "ExponentialScheduler": ExponentialScheduler, "PolyexponentialScheduler": PolyexponentialScheduler, "VPScheduler": VPScheduler, + "SDTurboScheduler": SDTurboScheduler, "KSamplerSelect": KSamplerSelect, "SamplerDPMPP_2M_SDE": SamplerDPMPP_2M_SDE, "SamplerDPMPP_SDE": SamplerDPMPP_SDE, From b911eefc4278b6069390d01a6ac9010ae6eecbac Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 28 Nov 2023 14:20:56 -0500 Subject: [PATCH 156/170] Limit gc.collect() to once every 10 seconds. --- main.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/main.py b/main.py index 1100a07f4..3997fbefc 100644 --- a/main.py +++ b/main.py @@ -88,6 +88,7 @@ def cuda_malloc_warning(): def prompt_worker(q, server): e = execution.PromptExecutor(server) + last_gc_collect = 0 while True: item, item_id = q.get() execution_start_time = time.perf_counter() @@ -97,9 +98,14 @@ def prompt_worker(q, server): if server.client_id is not None: server.send_sync("executing", { "node": None, "prompt_id": prompt_id }, server.client_id) - print("Prompt executed in {:.2f} seconds".format(time.perf_counter() - execution_start_time)) - gc.collect() - comfy.model_management.soft_empty_cache() + current_time = time.perf_counter() + execution_time = current_time - execution_start_time + print("Prompt executed in {:.2f} seconds".format(execution_time)) + if (current_time - last_gc_collect) > 10.0: + gc.collect() + comfy.model_management.soft_empty_cache() + last_gc_collect = current_time + print("gc collect") async def run(server, address='', port=8188, verbose=True, call_on_start=None): await asyncio.gather(server.start(address, port, verbose, call_on_start), server.publish_loop()) From 777f6b15225197898a5f49742682a2be859072d7 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 28 Nov 2023 14:45:00 -0500 Subject: [PATCH 157/170] Add to README that SDXL Turbo is supported. --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 9d7e31790..af1f22811 100644 --- a/README.md +++ b/README.md @@ -31,6 +31,7 @@ This ui will let you design and execute advanced stable diffusion pipelines usin - [GLIGEN](https://comfyanonymous.github.io/ComfyUI_examples/gligen/) - [Model Merging](https://comfyanonymous.github.io/ComfyUI_examples/model_merging/) - [LCM models and Loras](https://comfyanonymous.github.io/ComfyUI_examples/lcm/) +- [SDXL Turbo](https://comfyanonymous.github.io/ComfyUI_examples/sdturbo/) - Latent previews with [TAESD](#how-to-show-high-quality-previews) - Starts up very fast. - Works fully offline: will never download anything. From 7f469203b7b4547f1d0f7113d18095334fa06a4d Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Thu, 30 Nov 2023 19:13:27 +0000 Subject: [PATCH 158/170] Group nodes (#1776) * setup ui unit tests * Refactoring, adding connections * Few tweaks * Fix type * Add general test * Refactored and extended test * move to describe * for groups * wip group nodes * Relink nodes Fixed widget values Convert to nodes * Reconnect on convert back * add via node menu + canvas refactor * Add ws event handling * fix using wrong node on widget serialize * allow reroute pipe fix control_after_generate configure * allow multiple images * Add test for converted widgets on missing nodes + fix crash * tidy * mores tests + refactor * throw earlier to get less confusing error * support outputs * more test * add ci action * use lts node * Fix? * Prevent connecting non matching combos * update * accidently removed npm i * Disable logging extension * fix naming allow control_after_generate custom name allow convert from reroutes * group node tests * Add executing info, custom node icon Tidy * internal reroute just works * Fix crash on virtual nodes e.g. note * Save group nodes to templates * Fix template nodes not being stored * Fix aborting convert * tidy * Fix reconnecting output links on convert to group * Fix links on convert to nodes * Handle missing internal nodes * Trigger callback on text change * Apply value on connect * Fix converted widgets not reconnecting * Group node updates - persist internal ids in current session - copy widget values when converting to nodes - fix issue serializing converted inputs * Resolve issue with sanitized node name * Fix internal id * allow outputs to be used internally and externally * order widgets on group node various fixes * fix imageupload widget requiring a specific name * groupnode imageupload test give widget unique name * Fix issue with external node links * Add VAE model * Fix internal node id check * fix potential crash * wip widget input support * more wip group widget inputs * Group node refactor Support for primitives/converted widgets * Fix convert to nodes with internal reroutes * fix applying primitive * Fix control widget values * fix test --- .vscode/settings.json | 9 + tests-ui/setup.js | 1 + tests-ui/tests/groupNode.test.js | 818 ++++++++++++++++++++ tests-ui/tests/widgetInputs.test.js | 4 +- tests-ui/utils/ezgraph.js | 46 +- tests-ui/utils/index.js | 60 +- tests-ui/utils/setup.js | 20 +- web/extensions/core/groupNode.js | 1054 ++++++++++++++++++++++++++ web/extensions/core/nodeTemplates.js | 57 +- web/extensions/core/widgetInputs.js | 225 +++--- web/scripts/app.js | 324 ++++---- web/scripts/domWidget.js | 3 +- web/scripts/ui.js | 8 +- web/scripts/widgets.js | 144 +++- 14 files changed, 2417 insertions(+), 356 deletions(-) create mode 100644 .vscode/settings.json create mode 100644 tests-ui/tests/groupNode.test.js create mode 100644 web/extensions/core/groupNode.js diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 000000000..202121e10 --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,9 @@ +{ + "path-intellisense.mappings": { + "../": "${workspaceFolder}/web/extensions/core" + }, + "[python]": { + "editor.defaultFormatter": "ms-python.autopep8" + }, + "python.formatting.provider": "none" +} diff --git a/tests-ui/setup.js b/tests-ui/setup.js index 0f368ab22..8bbd9dcdf 100644 --- a/tests-ui/setup.js +++ b/tests-ui/setup.js @@ -20,6 +20,7 @@ async function setup() { // Modify the response data to add some checkpoints const objectInfo = JSON.parse(data); objectInfo.CheckpointLoaderSimple.input.required.ckpt_name[0] = ["model1.safetensors", "model2.ckpt"]; + objectInfo.VAELoader.input.required.vae_name[0] = ["vae1.safetensors", "vae2.ckpt"]; data = JSON.stringify(objectInfo, undefined, "\t"); diff --git a/tests-ui/tests/groupNode.test.js b/tests-ui/tests/groupNode.test.js new file mode 100644 index 000000000..ce54c1154 --- /dev/null +++ b/tests-ui/tests/groupNode.test.js @@ -0,0 +1,818 @@ +// @ts-check +/// + +const { start, createDefaultWorkflow } = require("../utils"); +const lg = require("../utils/litegraph"); + +describe("group node", () => { + beforeEach(() => { + lg.setup(global); + }); + + afterEach(() => { + lg.teardown(global); + }); + + /** + * + * @param {*} app + * @param {*} graph + * @param {*} name + * @param {*} nodes + * @returns { Promise> } + */ + async function convertToGroup(app, graph, name, nodes) { + // Select the nodes we are converting + for (const n of nodes) { + n.select(true); + } + + expect(Object.keys(app.canvas.selected_nodes).sort((a, b) => +a - +b)).toEqual( + nodes.map((n) => n.id + "").sort((a, b) => +a - +b) + ); + + global.prompt = jest.fn().mockImplementation(() => name); + const groupNode = await nodes[0].menu["Convert to Group Node"].call(false); + + // Check group name was requested + expect(window.prompt).toHaveBeenCalled(); + + // Ensure old nodes are removed + for (const n of nodes) { + expect(n.isRemoved).toBeTruthy(); + } + + expect(groupNode.type).toEqual("workflow/" + name); + + return graph.find(groupNode); + } + + /** + * @param { Record | number[] } idMap + * @param { Record> } valueMap + */ + function getOutput(idMap = {}, valueMap = {}) { + if (idMap instanceof Array) { + idMap = idMap.reduce((p, n) => { + p[n] = n + ""; + return p; + }, {}); + } + const expected = { + 1: { inputs: { ckpt_name: "model1.safetensors", ...valueMap?.[1] }, class_type: "CheckpointLoaderSimple" }, + 2: { inputs: { text: "positive", clip: ["1", 1], ...valueMap?.[2] }, class_type: "CLIPTextEncode" }, + 3: { inputs: { text: "negative", clip: ["1", 1], ...valueMap?.[3] }, class_type: "CLIPTextEncode" }, + 4: { inputs: { width: 512, height: 512, batch_size: 1, ...valueMap?.[4] }, class_type: "EmptyLatentImage" }, + 5: { + inputs: { + seed: 0, + steps: 20, + cfg: 8, + sampler_name: "euler", + scheduler: "normal", + denoise: 1, + model: ["1", 0], + positive: ["2", 0], + negative: ["3", 0], + latent_image: ["4", 0], + ...valueMap?.[5], + }, + class_type: "KSampler", + }, + 6: { inputs: { samples: ["5", 0], vae: ["1", 2], ...valueMap?.[6] }, class_type: "VAEDecode" }, + 7: { inputs: { filename_prefix: "ComfyUI", images: ["6", 0], ...valueMap?.[7] }, class_type: "SaveImage" }, + }; + + // Map old IDs to new at the top level + const mapped = {}; + for (const oldId in idMap) { + mapped[idMap[oldId]] = expected[oldId]; + delete expected[oldId]; + } + Object.assign(mapped, expected); + + // Map old IDs to new inside links + for (const k in mapped) { + for (const input in mapped[k].inputs) { + const v = mapped[k].inputs[input]; + if (v instanceof Array) { + if (v[0] in idMap) { + v[0] = idMap[v[0]] + ""; + } + } + } + } + + return mapped; + } + + test("can be created from selected nodes", async () => { + const { ez, graph, app } = await start(); + const nodes = createDefaultWorkflow(ez, graph); + const group = await convertToGroup(app, graph, "test", [nodes.pos, nodes.neg, nodes.empty]); + + // Ensure links are now to the group node + expect(group.inputs).toHaveLength(2); + expect(group.outputs).toHaveLength(3); + + expect(group.inputs.map((i) => i.input.name)).toEqual(["clip", "CLIPTextEncode clip"]); + expect(group.outputs.map((i) => i.output.name)).toEqual(["LATENT", "CONDITIONING", "CLIPTextEncode CONDITIONING"]); + + // ckpt clip to both clip inputs on the group + expect(nodes.ckpt.outputs.CLIP.connections.map((t) => [t.targetNode.id, t.targetInput.index])).toEqual([ + [group.id, 0], + [group.id, 1], + ]); + + // group conditioning to sampler + expect(group.outputs["CONDITIONING"].connections.map((t) => [t.targetNode.id, t.targetInput.index])).toEqual([ + [nodes.sampler.id, 1], + ]); + // group conditioning 2 to sampler + expect( + group.outputs["CLIPTextEncode CONDITIONING"].connections.map((t) => [t.targetNode.id, t.targetInput.index]) + ).toEqual([[nodes.sampler.id, 2]]); + // group latent to sampler + expect(group.outputs["LATENT"].connections.map((t) => [t.targetNode.id, t.targetInput.index])).toEqual([ + [nodes.sampler.id, 3], + ]); + }); + + test("maintains all output links on conversion", async () => { + const { ez, graph, app } = await start(); + const nodes = createDefaultWorkflow(ez, graph); + const save2 = ez.SaveImage(...nodes.decode.outputs); + const save3 = ez.SaveImage(...nodes.decode.outputs); + // Ensure an output with multiple links maintains them on convert to group + const group = await convertToGroup(app, graph, "test", [nodes.sampler, nodes.decode]); + expect(group.outputs[0].connections.length).toBe(3); + expect(group.outputs[0].connections[0].targetNode.id).toBe(nodes.save.id); + expect(group.outputs[0].connections[1].targetNode.id).toBe(save2.id); + expect(group.outputs[0].connections[2].targetNode.id).toBe(save3.id); + + // and they're still linked when converting back to nodes + const newNodes = group.menu["Convert to nodes"].call(); + const decode = graph.find(newNodes.find((n) => n.type === "VAEDecode")); + expect(decode.outputs[0].connections.length).toBe(3); + expect(decode.outputs[0].connections[0].targetNode.id).toBe(nodes.save.id); + expect(decode.outputs[0].connections[1].targetNode.id).toBe(save2.id); + expect(decode.outputs[0].connections[2].targetNode.id).toBe(save3.id); + }); + test("can be be converted back to nodes", async () => { + const { ez, graph, app } = await start(); + const nodes = createDefaultWorkflow(ez, graph); + const toConvert = [nodes.pos, nodes.neg, nodes.empty, nodes.sampler]; + const group = await convertToGroup(app, graph, "test", toConvert); + + // Edit some values to ensure they are set back onto the converted nodes + expect(group.widgets["text"].value).toBe("positive"); + group.widgets["text"].value = "pos"; + expect(group.widgets["CLIPTextEncode text"].value).toBe("negative"); + group.widgets["CLIPTextEncode text"].value = "neg"; + expect(group.widgets["width"].value).toBe(512); + group.widgets["width"].value = 1024; + expect(group.widgets["sampler_name"].value).toBe("euler"); + group.widgets["sampler_name"].value = "ddim"; + expect(group.widgets["control_after_generate"].value).toBe("randomize"); + group.widgets["control_after_generate"].value = "fixed"; + + /** @type { Array } */ + group.menu["Convert to nodes"].call(); + + // ensure widget values are set + const pos = graph.find(nodes.pos.id); + expect(pos.node.type).toBe("CLIPTextEncode"); + expect(pos.widgets["text"].value).toBe("pos"); + const neg = graph.find(nodes.neg.id); + expect(neg.node.type).toBe("CLIPTextEncode"); + expect(neg.widgets["text"].value).toBe("neg"); + const empty = graph.find(nodes.empty.id); + expect(empty.node.type).toBe("EmptyLatentImage"); + expect(empty.widgets["width"].value).toBe(1024); + const sampler = graph.find(nodes.sampler.id); + expect(sampler.node.type).toBe("KSampler"); + expect(sampler.widgets["sampler_name"].value).toBe("ddim"); + expect(sampler.widgets["control_after_generate"].value).toBe("fixed"); + + // validate links + expect(nodes.ckpt.outputs.CLIP.connections.map((t) => [t.targetNode.id, t.targetInput.index])).toEqual([ + [pos.id, 0], + [neg.id, 0], + ]); + + expect(pos.outputs["CONDITIONING"].connections.map((t) => [t.targetNode.id, t.targetInput.index])).toEqual([ + [nodes.sampler.id, 1], + ]); + + expect(neg.outputs["CONDITIONING"].connections.map((t) => [t.targetNode.id, t.targetInput.index])).toEqual([ + [nodes.sampler.id, 2], + ]); + + expect(empty.outputs["LATENT"].connections.map((t) => [t.targetNode.id, t.targetInput.index])).toEqual([ + [nodes.sampler.id, 3], + ]); + }); + test("it can embed reroutes as inputs", async () => { + const { ez, graph, app } = await start(); + const nodes = createDefaultWorkflow(ez, graph); + + // Add and connect a reroute to the clip text encodes + const reroute = ez.Reroute(); + nodes.ckpt.outputs.CLIP.connectTo(reroute.inputs[0]); + reroute.outputs[0].connectTo(nodes.pos.inputs[0]); + reroute.outputs[0].connectTo(nodes.neg.inputs[0]); + + // Convert to group and ensure we only have 1 input of the correct type + const group = await convertToGroup(app, graph, "test", [nodes.pos, nodes.neg, nodes.empty, reroute]); + expect(group.inputs).toHaveLength(1); + expect(group.inputs[0].input.type).toEqual("CLIP"); + + expect((await graph.toPrompt()).output).toEqual(getOutput()); + }); + test("it can embed reroutes as outputs", async () => { + const { ez, graph, app } = await start(); + const nodes = createDefaultWorkflow(ez, graph); + + // Add a reroute with no output so we output IMAGE even though its used internally + const reroute = ez.Reroute(); + nodes.decode.outputs.IMAGE.connectTo(reroute.inputs[0]); + + // Convert to group and ensure there is an IMAGE output + const group = await convertToGroup(app, graph, "test", [nodes.decode, nodes.save, reroute]); + expect(group.outputs).toHaveLength(1); + expect(group.outputs[0].output.type).toEqual("IMAGE"); + expect((await graph.toPrompt()).output).toEqual(getOutput([nodes.decode.id, nodes.save.id])); + }); + test("it can embed reroutes as pipes", async () => { + const { ez, graph, app } = await start(); + const nodes = createDefaultWorkflow(ez, graph); + + // Use reroutes as a pipe + const rerouteModel = ez.Reroute(); + const rerouteClip = ez.Reroute(); + const rerouteVae = ez.Reroute(); + nodes.ckpt.outputs.MODEL.connectTo(rerouteModel.inputs[0]); + nodes.ckpt.outputs.CLIP.connectTo(rerouteClip.inputs[0]); + nodes.ckpt.outputs.VAE.connectTo(rerouteVae.inputs[0]); + + const group = await convertToGroup(app, graph, "test", [rerouteModel, rerouteClip, rerouteVae]); + + expect(group.outputs).toHaveLength(3); + expect(group.outputs.map((o) => o.output.type)).toEqual(["MODEL", "CLIP", "VAE"]); + + expect(group.outputs).toHaveLength(3); + expect(group.outputs.map((o) => o.output.type)).toEqual(["MODEL", "CLIP", "VAE"]); + + group.outputs[0].connectTo(nodes.sampler.inputs.model); + group.outputs[1].connectTo(nodes.pos.inputs.clip); + group.outputs[1].connectTo(nodes.neg.inputs.clip); + }); + test("can handle reroutes used internally", async () => { + const { ez, graph, app } = await start(); + const nodes = createDefaultWorkflow(ez, graph); + + let reroutes = []; + let prevNode = nodes.ckpt; + for(let i = 0; i < 5; i++) { + const reroute = ez.Reroute(); + prevNode.outputs[0].connectTo(reroute.inputs[0]); + prevNode = reroute; + reroutes.push(reroute); + } + prevNode.outputs[0].connectTo(nodes.sampler.inputs.model); + + const group = await convertToGroup(app, graph, "test", [...reroutes, ...Object.values(nodes)]); + expect((await graph.toPrompt()).output).toEqual(getOutput()); + + group.menu["Convert to nodes"].call(); + expect((await graph.toPrompt()).output).toEqual(getOutput()); + }); + test("creates with widget values from inner nodes", async () => { + const { ez, graph, app } = await start(); + const nodes = createDefaultWorkflow(ez, graph); + + nodes.ckpt.widgets.ckpt_name.value = "model2.ckpt"; + nodes.pos.widgets.text.value = "hello"; + nodes.neg.widgets.text.value = "world"; + nodes.empty.widgets.width.value = 256; + nodes.empty.widgets.height.value = 1024; + nodes.sampler.widgets.seed.value = 1; + nodes.sampler.widgets.control_after_generate.value = "increment"; + nodes.sampler.widgets.steps.value = 8; + nodes.sampler.widgets.cfg.value = 4.5; + nodes.sampler.widgets.sampler_name.value = "uni_pc"; + nodes.sampler.widgets.scheduler.value = "karras"; + nodes.sampler.widgets.denoise.value = 0.9; + + const group = await convertToGroup(app, graph, "test", [ + nodes.ckpt, + nodes.pos, + nodes.neg, + nodes.empty, + nodes.sampler, + ]); + + expect(group.widgets["ckpt_name"].value).toEqual("model2.ckpt"); + expect(group.widgets["text"].value).toEqual("hello"); + expect(group.widgets["CLIPTextEncode text"].value).toEqual("world"); + expect(group.widgets["width"].value).toEqual(256); + expect(group.widgets["height"].value).toEqual(1024); + expect(group.widgets["seed"].value).toEqual(1); + expect(group.widgets["control_after_generate"].value).toEqual("increment"); + expect(group.widgets["steps"].value).toEqual(8); + expect(group.widgets["cfg"].value).toEqual(4.5); + expect(group.widgets["sampler_name"].value).toEqual("uni_pc"); + expect(group.widgets["scheduler"].value).toEqual("karras"); + expect(group.widgets["denoise"].value).toEqual(0.9); + + expect((await graph.toPrompt()).output).toEqual( + getOutput([nodes.ckpt.id, nodes.pos.id, nodes.neg.id, nodes.empty.id, nodes.sampler.id], { + [nodes.ckpt.id]: { ckpt_name: "model2.ckpt" }, + [nodes.pos.id]: { text: "hello" }, + [nodes.neg.id]: { text: "world" }, + [nodes.empty.id]: { width: 256, height: 1024 }, + [nodes.sampler.id]: { + seed: 1, + steps: 8, + cfg: 4.5, + sampler_name: "uni_pc", + scheduler: "karras", + denoise: 0.9, + }, + }) + ); + }); + test("group inputs can be reroutes", async () => { + const { ez, graph, app } = await start(); + const nodes = createDefaultWorkflow(ez, graph); + const group = await convertToGroup(app, graph, "test", [nodes.pos, nodes.neg]); + + const reroute = ez.Reroute(); + nodes.ckpt.outputs.CLIP.connectTo(reroute.inputs[0]); + + reroute.outputs[0].connectTo(group.inputs[0]); + reroute.outputs[0].connectTo(group.inputs[1]); + + expect((await graph.toPrompt()).output).toEqual(getOutput([nodes.pos.id, nodes.neg.id])); + }); + test("group outputs can be reroutes", async () => { + const { ez, graph, app } = await start(); + const nodes = createDefaultWorkflow(ez, graph); + const group = await convertToGroup(app, graph, "test", [nodes.pos, nodes.neg]); + + const reroute1 = ez.Reroute(); + const reroute2 = ez.Reroute(); + group.outputs[0].connectTo(reroute1.inputs[0]); + group.outputs[1].connectTo(reroute2.inputs[0]); + + reroute1.outputs[0].connectTo(nodes.sampler.inputs.positive); + reroute2.outputs[0].connectTo(nodes.sampler.inputs.negative); + + expect((await graph.toPrompt()).output).toEqual(getOutput([nodes.pos.id, nodes.neg.id])); + }); + test("groups can connect to each other", async () => { + const { ez, graph, app } = await start(); + const nodes = createDefaultWorkflow(ez, graph); + const group1 = await convertToGroup(app, graph, "test", [nodes.pos, nodes.neg]); + const group2 = await convertToGroup(app, graph, "test2", [nodes.empty, nodes.sampler]); + + group1.outputs[0].connectTo(group2.inputs["positive"]); + group1.outputs[1].connectTo(group2.inputs["negative"]); + + expect((await graph.toPrompt()).output).toEqual( + getOutput([nodes.pos.id, nodes.neg.id, nodes.empty.id, nodes.sampler.id]) + ); + }); + test("displays generated image on group node", async () => { + const { ez, graph, app } = await start(); + const nodes = createDefaultWorkflow(ez, graph); + let group = await convertToGroup(app, graph, "test", [ + nodes.pos, + nodes.neg, + nodes.empty, + nodes.sampler, + nodes.decode, + nodes.save, + ]); + + const { api } = require("../../web/scripts/api"); + + api.dispatchEvent(new CustomEvent("execution_start", {})); + api.dispatchEvent(new CustomEvent("executing", { detail: `${nodes.save.id}` })); + // Event should be forwarded to group node id + expect(+app.runningNodeId).toEqual(group.id); + expect(group.node["imgs"]).toBeFalsy(); + api.dispatchEvent( + new CustomEvent("executed", { + detail: { + node: `${nodes.save.id}`, + output: { + images: [ + { + filename: "test.png", + type: "output", + }, + ], + }, + }, + }) + ); + + // Trigger paint + group.node.onDrawBackground?.(app.canvas.ctx, app.canvas.canvas); + + expect(group.node["images"]).toEqual([ + { + filename: "test.png", + type: "output", + }, + ]); + + // Reload + const workflow = JSON.stringify((await graph.toPrompt()).workflow); + await app.loadGraphData(JSON.parse(workflow)); + group = graph.find(group); + + // Trigger inner nodes to get created + group.node["getInnerNodes"](); + + // Check it works for internal node ids + api.dispatchEvent(new CustomEvent("execution_start", {})); + api.dispatchEvent(new CustomEvent("executing", { detail: `${group.id}:5` })); + // Event should be forwarded to group node id + expect(+app.runningNodeId).toEqual(group.id); + expect(group.node["imgs"]).toBeFalsy(); + api.dispatchEvent( + new CustomEvent("executed", { + detail: { + node: `${group.id}:5`, + output: { + images: [ + { + filename: "test2.png", + type: "output", + }, + ], + }, + }, + }) + ); + + // Trigger paint + group.node.onDrawBackground?.(app.canvas.ctx, app.canvas.canvas); + + expect(group.node["images"]).toEqual([ + { + filename: "test2.png", + type: "output", + }, + ]); + }); + test("allows widgets to be converted to inputs", async () => { + const { ez, graph, app } = await start(); + const nodes = createDefaultWorkflow(ez, graph); + const group = await convertToGroup(app, graph, "test", [nodes.pos, nodes.neg]); + group.widgets[0].convertToInput(); + + const primitive = ez.PrimitiveNode(); + primitive.outputs[0].connectTo(group.inputs["text"]); + primitive.widgets[0].value = "hello"; + + expect((await graph.toPrompt()).output).toEqual( + getOutput([nodes.pos.id, nodes.neg.id], { + [nodes.pos.id]: { text: "hello" }, + }) + ); + }); + test("can be copied", async () => { + const { ez, graph, app } = await start(); + const nodes = createDefaultWorkflow(ez, graph); + + const group1 = await convertToGroup(app, graph, "test", [ + nodes.pos, + nodes.neg, + nodes.empty, + nodes.sampler, + nodes.decode, + nodes.save, + ]); + + group1.widgets["text"].value = "hello"; + group1.widgets["width"].value = 256; + group1.widgets["seed"].value = 1; + + // Clone the node + group1.menu.Clone.call(); + expect(app.graph._nodes).toHaveLength(3); + const group2 = graph.find(app.graph._nodes[2]); + expect(group2.node.type).toEqual("workflow/test"); + expect(group2.id).not.toEqual(group1.id); + + // Reconnect ckpt + nodes.ckpt.outputs.MODEL.connectTo(group2.inputs["model"]); + nodes.ckpt.outputs.CLIP.connectTo(group2.inputs["clip"]); + nodes.ckpt.outputs.CLIP.connectTo(group2.inputs["CLIPTextEncode clip"]); + nodes.ckpt.outputs.VAE.connectTo(group2.inputs["vae"]); + + group2.widgets["text"].value = "world"; + group2.widgets["width"].value = 1024; + group2.widgets["seed"].value = 100; + + let i = 0; + expect((await graph.toPrompt()).output).toEqual({ + ...getOutput([nodes.empty.id, nodes.pos.id, nodes.neg.id, nodes.sampler.id, nodes.decode.id, nodes.save.id], { + [nodes.empty.id]: { width: 256 }, + [nodes.pos.id]: { text: "hello" }, + [nodes.sampler.id]: { seed: 1 }, + }), + ...getOutput( + { + [nodes.empty.id]: `${group2.id}:${i++}`, + [nodes.pos.id]: `${group2.id}:${i++}`, + [nodes.neg.id]: `${group2.id}:${i++}`, + [nodes.sampler.id]: `${group2.id}:${i++}`, + [nodes.decode.id]: `${group2.id}:${i++}`, + [nodes.save.id]: `${group2.id}:${i++}`, + }, + { + [nodes.empty.id]: { width: 1024 }, + [nodes.pos.id]: { text: "world" }, + [nodes.sampler.id]: { seed: 100 }, + } + ), + }); + + graph.arrange(); + }); + test("is embedded in workflow", async () => { + let { ez, graph, app } = await start(); + const nodes = createDefaultWorkflow(ez, graph); + let group = await convertToGroup(app, graph, "test", [nodes.pos, nodes.neg]); + const workflow = JSON.stringify((await graph.toPrompt()).workflow); + + // Clear the environment + ({ ez, graph, app } = await start({ + resetEnv: true, + })); + // Ensure the node isnt registered + expect(() => ez["workflow/test"]).toThrow(); + + // Reload the workflow + await app.loadGraphData(JSON.parse(workflow)); + + // Ensure the node is found + group = graph.find(group); + + // Generate prompt and ensure it is as expected + expect((await graph.toPrompt()).output).toEqual( + getOutput({ + [nodes.pos.id]: `${group.id}:0`, + [nodes.neg.id]: `${group.id}:1`, + }) + ); + }); + test("shows missing node error on missing internal node when loading graph data", async () => { + const { graph } = await start(); + + const dialogShow = jest.spyOn(graph.app.ui.dialog, "show"); + await graph.app.loadGraphData({ + last_node_id: 3, + last_link_id: 1, + nodes: [ + { + id: 3, + type: "workflow/testerror", + }, + ], + links: [], + groups: [], + config: {}, + extra: { + groupNodes: { + testerror: { + nodes: [ + { + type: "NotKSampler", + }, + { + type: "NotVAEDecode", + }, + ], + }, + }, + }, + }); + + expect(dialogShow).toBeCalledTimes(1); + const call = dialogShow.mock.calls[0][0].innerHTML; + expect(call).toContain("the following node types were not found"); + expect(call).toContain("NotKSampler"); + expect(call).toContain("NotVAEDecode"); + expect(call).toContain("workflow/testerror"); + }); + test("maintains widget inputs on conversion back to nodes", async () => { + const { ez, graph, app } = await start(); + let pos = ez.CLIPTextEncode({ text: "positive" }); + pos.node.title = "Positive"; + let neg = ez.CLIPTextEncode({ text: "negative" }); + neg.node.title = "Negative"; + pos.widgets.text.convertToInput(); + neg.widgets.text.convertToInput(); + + let primitive = ez.PrimitiveNode(); + primitive.outputs[0].connectTo(pos.inputs.text); + primitive.outputs[0].connectTo(neg.inputs.text); + + const group = await convertToGroup(app, graph, "test", [pos, neg, primitive]); + // This will use a primitive widget named 'value' + expect(group.widgets.length).toBe(1); + expect(group.widgets["value"].value).toBe("positive"); + + const newNodes = group.menu["Convert to nodes"].call(); + pos = graph.find(newNodes.find((n) => n.title === "Positive")); + neg = graph.find(newNodes.find((n) => n.title === "Negative")); + primitive = graph.find(newNodes.find((n) => n.type === "PrimitiveNode")); + + expect(pos.inputs).toHaveLength(2); + expect(neg.inputs).toHaveLength(2); + expect(primitive.outputs[0].connections).toHaveLength(2); + + expect((await graph.toPrompt()).output).toEqual({ + 1: { inputs: { text: "positive" }, class_type: "CLIPTextEncode" }, + 2: { inputs: { text: "positive" }, class_type: "CLIPTextEncode" }, + }); + }); + test("adds widgets in node execution order", async () => { + const { ez, graph, app } = await start(); + const scale = ez.LatentUpscale(); + const save = ez.SaveImage(); + const empty = ez.EmptyLatentImage(); + const decode = ez.VAEDecode(); + + scale.outputs.LATENT.connectTo(decode.inputs.samples); + decode.outputs.IMAGE.connectTo(save.inputs.images); + empty.outputs.LATENT.connectTo(scale.inputs.samples); + + const group = await convertToGroup(app, graph, "test", [scale, save, empty, decode]); + const widgets = group.widgets.map((w) => w.widget.name); + expect(widgets).toStrictEqual([ + "width", + "height", + "batch_size", + "upscale_method", + "LatentUpscale width", + "LatentUpscale height", + "crop", + "filename_prefix", + ]); + }); + test("adds output for external links when converting to group", async () => { + const { ez, graph, app } = await start(); + const img = ez.EmptyLatentImage(); + let decode = ez.VAEDecode(...img.outputs); + const preview1 = ez.PreviewImage(...decode.outputs); + const preview2 = ez.PreviewImage(...decode.outputs); + + const group = await convertToGroup(app, graph, "test", [img, decode, preview1]); + + // Ensure we have an output connected to the 2nd preview node + expect(group.outputs.length).toBe(1); + expect(group.outputs[0].connections.length).toBe(1); + expect(group.outputs[0].connections[0].targetNode.id).toBe(preview2.id); + + // Convert back and ensure bothe previews are still connected + group.menu["Convert to nodes"].call(); + decode = graph.find(decode); + expect(decode.outputs[0].connections.length).toBe(2); + expect(decode.outputs[0].connections[0].targetNode.id).toBe(preview1.id); + expect(decode.outputs[0].connections[1].targetNode.id).toBe(preview2.id); + }); + test("adds output for external links when converting to group when nodes are not in execution order", async () => { + const { ez, graph, app } = await start(); + const sampler = ez.KSampler(); + const ckpt = ez.CheckpointLoaderSimple(); + const empty = ez.EmptyLatentImage(); + const pos = ez.CLIPTextEncode(ckpt.outputs.CLIP, { text: "positive" }); + const neg = ez.CLIPTextEncode(ckpt.outputs.CLIP, { text: "negative" }); + const decode1 = ez.VAEDecode(sampler.outputs.LATENT, ckpt.outputs.VAE); + const save = ez.SaveImage(decode1.outputs.IMAGE); + ckpt.outputs.MODEL.connectTo(sampler.inputs.model); + pos.outputs.CONDITIONING.connectTo(sampler.inputs.positive); + neg.outputs.CONDITIONING.connectTo(sampler.inputs.negative); + empty.outputs.LATENT.connectTo(sampler.inputs.latent_image); + + const encode = ez.VAEEncode(decode1.outputs.IMAGE); + const vae = ez.VAELoader(); + const decode2 = ez.VAEDecode(encode.outputs.LATENT, vae.outputs.VAE); + const preview = ez.PreviewImage(decode2.outputs.IMAGE); + vae.outputs.VAE.connectTo(encode.inputs.vae); + + const group = await convertToGroup(app, graph, "test", [vae, decode1, encode, sampler]); + + expect(group.outputs.length).toBe(3); + expect(group.outputs[0].output.name).toBe("VAE"); + expect(group.outputs[0].output.type).toBe("VAE"); + expect(group.outputs[1].output.name).toBe("IMAGE"); + expect(group.outputs[1].output.type).toBe("IMAGE"); + expect(group.outputs[2].output.name).toBe("LATENT"); + expect(group.outputs[2].output.type).toBe("LATENT"); + + expect(group.outputs[0].connections.length).toBe(1); + expect(group.outputs[0].connections[0].targetNode.id).toBe(decode2.id); + expect(group.outputs[0].connections[0].targetInput.index).toBe(1); + + expect(group.outputs[1].connections.length).toBe(1); + expect(group.outputs[1].connections[0].targetNode.id).toBe(save.id); + expect(group.outputs[1].connections[0].targetInput.index).toBe(0); + + expect(group.outputs[2].connections.length).toBe(1); + expect(group.outputs[2].connections[0].targetNode.id).toBe(decode2.id); + expect(group.outputs[2].connections[0].targetInput.index).toBe(0); + + expect((await graph.toPrompt()).output).toEqual({ + ...getOutput({ 1: ckpt.id, 2: pos.id, 3: neg.id, 4: empty.id, 5: sampler.id, 6: decode1.id, 7: save.id }), + [vae.id]: { inputs: { vae_name: "vae1.safetensors" }, class_type: vae.node.type }, + [encode.id]: { inputs: { pixels: ["6", 0], vae: [vae.id + "", 0] }, class_type: encode.node.type }, + [decode2.id]: { inputs: { samples: [encode.id + "", 0], vae: [vae.id + "", 0] }, class_type: decode2.node.type }, + [preview.id]: { inputs: { images: [decode2.id + "", 0] }, class_type: preview.node.type }, + }); + }); + test("works with IMAGEUPLOAD widget", async () => { + const { ez, graph, app } = await start(); + const img = ez.LoadImage(); + const preview1 = ez.PreviewImage(img.outputs[0]); + + const group = await convertToGroup(app, graph, "test", [img, preview1]); + const widget = group.widgets["upload"]; + expect(widget).toBeTruthy(); + expect(widget.widget.type).toBe("button"); + }); + test("internal primitive populates widgets for all linked inputs", async () => { + const { ez, graph, app } = await start(); + const img = ez.LoadImage(); + const scale1 = ez.ImageScale(img.outputs[0]); + const scale2 = ez.ImageScale(img.outputs[0]); + ez.PreviewImage(scale1.outputs[0]); + ez.PreviewImage(scale2.outputs[0]); + + scale1.widgets.width.convertToInput(); + scale2.widgets.height.convertToInput(); + + const primitive = ez.PrimitiveNode(); + primitive.outputs[0].connectTo(scale1.inputs.width); + primitive.outputs[0].connectTo(scale2.inputs.height); + + const group = await convertToGroup(app, graph, "test", [img, primitive, scale1, scale2]); + group.widgets.value.value = 100; + expect((await graph.toPrompt()).output).toEqual({ + 1: { + inputs: { image: img.widgets.image.value, upload: "image" }, + class_type: "LoadImage", + }, + 2: { + inputs: { upscale_method: "nearest-exact", width: 100, height: 512, crop: "disabled", image: ["1", 0] }, + class_type: "ImageScale", + }, + 3: { + inputs: { upscale_method: "nearest-exact", width: 512, height: 100, crop: "disabled", image: ["1", 0] }, + class_type: "ImageScale", + }, + 4: { inputs: { images: ["2", 0] }, class_type: "PreviewImage" }, + 5: { inputs: { images: ["3", 0] }, class_type: "PreviewImage" }, + }); + }); + test("primitive control widgets values are copied on convert", async () => { + const { ez, graph, app } = await start(); + const sampler = ez.KSampler(); + sampler.widgets.seed.convertToInput(); + sampler.widgets.sampler_name.convertToInput(); + + let p1 = ez.PrimitiveNode(); + let p2 = ez.PrimitiveNode(); + p1.outputs[0].connectTo(sampler.inputs.seed); + p2.outputs[0].connectTo(sampler.inputs.sampler_name); + + p1.widgets.control_after_generate.value = "increment"; + p2.widgets.control_after_generate.value = "decrement"; + p2.widgets.control_filter_list.value = "/.*/"; + + p2.node.title = "p2"; + + const group = await convertToGroup(app, graph, "test", [sampler, p1, p2]); + expect(group.widgets.control_after_generate.value).toBe("increment"); + expect(group.widgets["p2 control_after_generate"].value).toBe("decrement"); + expect(group.widgets["p2 control_filter_list"].value).toBe("/.*/"); + + group.widgets.control_after_generate.value = "fixed"; + group.widgets["p2 control_after_generate"].value = "randomize"; + group.widgets["p2 control_filter_list"].value = "/.+/"; + + group.menu["Convert to nodes"].call(); + p1 = graph.find(p1); + p2 = graph.find(p2); + + expect(p1.widgets.control_after_generate.value).toBe("fixed"); + expect(p2.widgets.control_after_generate.value).toBe("randomize"); + expect(p2.widgets.control_filter_list.value).toBe("/.+/"); + }); +}); diff --git a/tests-ui/tests/widgetInputs.test.js b/tests-ui/tests/widgetInputs.test.js index e1873105a..8e191adf0 100644 --- a/tests-ui/tests/widgetInputs.test.js +++ b/tests-ui/tests/widgetInputs.test.js @@ -202,8 +202,8 @@ describe("widget inputs", () => { }); expect(dialogShow).toBeCalledTimes(1); - expect(dialogShow.mock.calls[0][0]).toContain("the following node types were not found"); - expect(dialogShow.mock.calls[0][0]).toContain("TestNode"); + expect(dialogShow.mock.calls[0][0].innerHTML).toContain("the following node types were not found"); + expect(dialogShow.mock.calls[0][0].innerHTML).toContain("TestNode"); }); test("defaultInput widgets can be converted back to inputs", async () => { diff --git a/tests-ui/utils/ezgraph.js b/tests-ui/utils/ezgraph.js index 0e81fd47b..898b82db0 100644 --- a/tests-ui/utils/ezgraph.js +++ b/tests-ui/utils/ezgraph.js @@ -150,7 +150,7 @@ export class EzNodeMenuItem { if (selectNode) { this.node.select(); } - this.item.callback.call(this.node.node, undefined, undefined, undefined, undefined, this.node.node); + return this.item.callback.call(this.node.node, undefined, undefined, undefined, undefined, this.node.node); } } @@ -240,8 +240,12 @@ export class EzNode { return this.#makeLookupArray(() => this.app.canvas.getNodeMenuOptions(this.node), "content", EzNodeMenuItem); } - select() { - this.app.canvas.selectNode(this.node); + get isRemoved() { + return !this.app.graph.getNodeById(this.id); + } + + select(addToSelection = false) { + this.app.canvas.selectNode(this.node, addToSelection); } // /** @@ -275,12 +279,17 @@ export class EzNode { if (!s) return p; const name = s[nameProperty]; + const item = new ctor(this, i, s); // @ts-ignore - if (!name || name in p) { - throw new Error(`Unable to store ${nodeProperty} ${name} on array as name conflicts.`); + p.push(item); + if (name) { + // @ts-ignore + if (name in p) { + throw new Error(`Unable to store ${nodeProperty} ${name} on array as name conflicts.`); + } } // @ts-ignore - p.push((p[name] = new ctor(this, i, s))); + p[name] = item; return p; }, Object.assign([], { $: this })); } @@ -348,6 +357,19 @@ export class EzGraph { }, 10); }); } + + /** + * @returns { Promise<{ + * workflow: {}, + * output: Record + * }>}> } + */ + toPrompt() { + // @ts-ignore + return this.app.graphToPrompt(); + } } export const Ez = { @@ -356,12 +378,12 @@ export const Ez = { * @example * const { ez, graph } = Ez.graph(app); * graph.clear(); - * const [model, clip, vae] = ez.CheckpointLoaderSimple(); - * const [pos] = ez.CLIPTextEncode(clip, { text: "positive" }); - * const [neg] = ez.CLIPTextEncode(clip, { text: "negative" }); - * const [latent] = ez.KSampler(model, pos, neg, ...ez.EmptyLatentImage()); - * const [image] = ez.VAEDecode(latent, vae); - * const saveNode = ez.SaveImage(image).node; + * const [model, clip, vae] = ez.CheckpointLoaderSimple().outputs; + * const [pos] = ez.CLIPTextEncode(clip, { text: "positive" }).outputs; + * const [neg] = ez.CLIPTextEncode(clip, { text: "negative" }).outputs; + * const [latent] = ez.KSampler(model, pos, neg, ...ez.EmptyLatentImage().outputs).outputs; + * const [image] = ez.VAEDecode(latent, vae).outputs; + * const saveNode = ez.SaveImage(image); * console.log(saveNode); * graph.arrange(); * @param { app } app diff --git a/tests-ui/utils/index.js b/tests-ui/utils/index.js index 01c58b21f..eeccdb3d9 100644 --- a/tests-ui/utils/index.js +++ b/tests-ui/utils/index.js @@ -1,21 +1,28 @@ const { mockApi } = require("./setup"); const { Ez } = require("./ezgraph"); +const lg = require("./litegraph"); /** * - * @param { Parameters[0] } config + * @param { Parameters[0] & { resetEnv?: boolean } } config * @returns */ export async function start(config = undefined) { + if(config?.resetEnv) { + jest.resetModules(); + jest.resetAllMocks(); + lg.setup(global); + } + mockApi(config); const { app } = require("../../web/scripts/app"); await app.setup(); - return Ez.graph(app, global["LiteGraph"], global["LGraphCanvas"]); + return { ...Ez.graph(app, global["LiteGraph"], global["LGraphCanvas"]), app }; } /** - * @param { ReturnType["graph"] } graph - * @param { (hasReloaded: boolean) => (Promise | void) } cb + * @param { ReturnType["graph"] } graph + * @param { (hasReloaded: boolean) => (Promise | void) } cb */ export async function checkBeforeAndAfterReload(graph, cb) { await cb(false); @@ -24,10 +31,10 @@ export async function checkBeforeAndAfterReload(graph, cb) { } /** - * @param { string } name - * @param { Record } input + * @param { string } name + * @param { Record } input * @param { (string | string[])[] | Record } output - * @returns { Record } + * @returns { Record } */ export function makeNodeDef(name, input, output = {}) { const nodeDef = { @@ -37,19 +44,19 @@ export function makeNodeDef(name, input, output = {}) { output_name: [], output_is_list: [], input: { - required: {} + required: {}, }, }; - for(const k in input) { + for (const k in input) { nodeDef.input.required[k] = typeof input[k] === "string" ? [input[k], {}] : [...input[k]]; } - if(output instanceof Array) { + if (output instanceof Array) { output = output.reduce((p, c) => { p[c] = c; return p; - }, {}) + }, {}); } - for(const k in output) { + for (const k in output) { nodeDef.output.push(output[k]); nodeDef.output_name.push(k); nodeDef.output_is_list.push(false); @@ -68,4 +75,31 @@ export function assertNotNullOrUndefined(x) { expect(x).not.toEqual(null); expect(x).not.toEqual(undefined); return true; -} \ No newline at end of file +} + +/** + * + * @param { ReturnType["ez"] } ez + * @param { ReturnType["graph"] } graph + */ +export function createDefaultWorkflow(ez, graph) { + graph.clear(); + const ckpt = ez.CheckpointLoaderSimple(); + + const pos = ez.CLIPTextEncode(ckpt.outputs.CLIP, { text: "positive" }); + const neg = ez.CLIPTextEncode(ckpt.outputs.CLIP, { text: "negative" }); + + const empty = ez.EmptyLatentImage(); + const sampler = ez.KSampler( + ckpt.outputs.MODEL, + pos.outputs.CONDITIONING, + neg.outputs.CONDITIONING, + empty.outputs.LATENT + ); + + const decode = ez.VAEDecode(sampler.outputs.LATENT, ckpt.outputs.VAE); + const save = ez.SaveImage(decode.outputs.IMAGE); + graph.arrange(); + + return { ckpt, pos, neg, empty, sampler, decode, save }; +} diff --git a/tests-ui/utils/setup.js b/tests-ui/utils/setup.js index 17e8ac1ad..dd150214a 100644 --- a/tests-ui/utils/setup.js +++ b/tests-ui/utils/setup.js @@ -30,16 +30,20 @@ export function mockApi({ mockExtensions, mockNodeDefs } = {}) { mockNodeDefs = JSON.parse(fs.readFileSync(path.resolve("./data/object_info.json"))); } + const events = new EventTarget(); + const mockApi = { + addEventListener: events.addEventListener.bind(events), + removeEventListener: events.removeEventListener.bind(events), + dispatchEvent: events.dispatchEvent.bind(events), + getSystemStats: jest.fn(), + getExtensions: jest.fn(() => mockExtensions), + getNodeDefs: jest.fn(() => mockNodeDefs), + init: jest.fn(), + apiURL: jest.fn((x) => "../../web/" + x), + }; jest.mock("../../web/scripts/api", () => ({ get api() { - return { - addEventListener: jest.fn(), - getSystemStats: jest.fn(), - getExtensions: jest.fn(() => mockExtensions), - getNodeDefs: jest.fn(() => mockNodeDefs), - init: jest.fn(), - apiURL: jest.fn((x) => "../../web/" + x), - }; + return mockApi; }, })); } diff --git a/web/extensions/core/groupNode.js b/web/extensions/core/groupNode.js new file mode 100644 index 000000000..450b4f5f3 --- /dev/null +++ b/web/extensions/core/groupNode.js @@ -0,0 +1,1054 @@ +import { app } from "../../scripts/app.js"; +import { api } from "../../scripts/api.js"; +import { getWidgetType } from "../../scripts/widgets.js"; +import { mergeIfValid } from "./widgetInputs.js"; + +const GROUP = Symbol(); + +const Workflow = { + InUse: { + Free: 0, + Registered: 1, + InWorkflow: 2, + }, + isInUseGroupNode(name) { + const id = `workflow/${name}`; + // Check if lready registered/in use in this workflow + if (app.graph.extra?.groupNodes?.[name]) { + if (app.graph._nodes.find((n) => n.type === id)) { + return Workflow.InUse.InWorkflow; + } else { + return Workflow.InUse.Registered; + } + } + return Workflow.InUse.Free; + }, + storeGroupNode(name, data) { + let extra = app.graph.extra; + if (!extra) app.graph.extra = extra = {}; + let groupNodes = extra.groupNodes; + if (!groupNodes) extra.groupNodes = groupNodes = {}; + groupNodes[name] = data; + }, +}; + +class GroupNodeBuilder { + constructor(nodes) { + this.nodes = nodes; + } + + build() { + const name = this.getName(); + if (!name) return; + + // Sort the nodes so they are in execution order + // this allows for widgets to be in the correct order when reconstructing + this.sortNodes(); + + this.nodeData = this.getNodeData(); + Workflow.storeGroupNode(name, this.nodeData); + + return { name, nodeData: this.nodeData }; + } + + getName() { + const name = prompt("Enter group name"); + if (!name) return; + const used = Workflow.isInUseGroupNode(name); + switch (used) { + case Workflow.InUse.InWorkflow: + alert( + "An in use group node with this name already exists embedded in this workflow, please remove any instances or use a new name." + ); + return; + case Workflow.InUse.Registered: + if ( + !confirm( + "An group node with this name already exists embedded in this workflow, are you sure you want to overwrite it?" + ) + ) { + return; + } + break; + } + return name; + } + + sortNodes() { + // Gets the builders nodes in graph execution order + const nodesInOrder = app.graph.computeExecutionOrder(false); + this.nodes = this.nodes + .map((node) => ({ index: nodesInOrder.indexOf(node), node })) + .sort((a, b) => a.index - b.index || a.node.id - b.node.id) + .map(({ node }) => node); + } + + getNodeData() { + const storeLinkTypes = (config) => { + // Store link types for dynamically typed nodes e.g. reroutes + for (const link of config.links) { + const origin = app.graph.getNodeById(link[4]); + const type = origin.outputs[link[1]].type; + link.push(type); + } + }; + + const storeExternalLinks = (config) => { + // Store any external links to the group in the config so when rebuilding we add extra slots + config.external = []; + for (let i = 0; i < this.nodes.length; i++) { + const node = this.nodes[i]; + if (!node.outputs?.length) continue; + for (let slot = 0; slot < node.outputs.length; slot++) { + let hasExternal = false; + const output = node.outputs[slot]; + let type = output.type; + if (!output.links?.length) continue; + for (const l of output.links) { + const link = app.graph.links[l]; + if (!link) continue; + if (type === "*") type = link.type; + + if (!app.canvas.selected_nodes[link.target_id]) { + hasExternal = true; + break; + } + } + if (hasExternal) { + config.external.push([i, slot, type]); + } + } + } + }; + + // Use the built in copyToClipboard function to generate the node data we need + const backup = localStorage.getItem("litegrapheditor_clipboard"); + try { + app.canvas.copyToClipboard(this.nodes); + const config = JSON.parse(localStorage.getItem("litegrapheditor_clipboard")); + + storeLinkTypes(config); + storeExternalLinks(config); + + return config; + } finally { + localStorage.setItem("litegrapheditor_clipboard", backup); + } + } +} + +export class GroupNodeConfig { + constructor(name, nodeData) { + this.name = name; + this.nodeData = nodeData; + this.getLinks(); + + this.inputCount = 0; + this.oldToNewOutputMap = {}; + this.newToOldOutputMap = {}; + this.oldToNewInputMap = {}; + this.oldToNewWidgetMap = {}; + this.newToOldWidgetMap = {}; + this.primitiveDefs = {}; + this.widgetToPrimitive = {}; + this.primitiveToWidget = {}; + } + + async registerType(source = "workflow") { + this.nodeDef = { + output: [], + output_name: [], + output_is_list: [], + name: source + "/" + this.name, + display_name: this.name, + category: "group nodes" + ("/" + source), + input: { required: {} }, + + [GROUP]: this, + }; + + this.inputs = []; + const seenInputs = {}; + const seenOutputs = {}; + for (let i = 0; i < this.nodeData.nodes.length; i++) { + const node = this.nodeData.nodes[i]; + node.index = i; + this.processNode(node, seenInputs, seenOutputs); + } + await app.registerNodeDef("workflow/" + this.name, this.nodeDef); + } + + getLinks() { + this.linksFrom = {}; + this.linksTo = {}; + this.externalFrom = {}; + + // Extract links for easy lookup + for (const l of this.nodeData.links) { + const [sourceNodeId, sourceNodeSlot, targetNodeId, targetNodeSlot] = l; + + // Skip links outside the copy config + if (sourceNodeId == null) continue; + + if (!this.linksFrom[sourceNodeId]) { + this.linksFrom[sourceNodeId] = {}; + } + this.linksFrom[sourceNodeId][sourceNodeSlot] = l; + + if (!this.linksTo[targetNodeId]) { + this.linksTo[targetNodeId] = {}; + } + this.linksTo[targetNodeId][targetNodeSlot] = l; + } + + if (this.nodeData.external) { + for (const ext of this.nodeData.external) { + if (!this.externalFrom[ext[0]]) { + this.externalFrom[ext[0]] = { [ext[1]]: ext[2] }; + } else { + this.externalFrom[ext[0]][ext[1]] = ext[2]; + } + } + } + } + + processNode(node, seenInputs, seenOutputs) { + const def = this.getNodeDef(node); + if (!def) return; + + const inputs = { ...def.input?.required, ...def.input?.optional }; + + this.inputs.push(this.processNodeInputs(node, seenInputs, inputs)); + if (def.output?.length) this.processNodeOutputs(node, seenOutputs, def); + } + + getNodeDef(node) { + const def = globalDefs[node.type]; + if (def) return def; + + const linksFrom = this.linksFrom[node.index]; + if (node.type === "PrimitiveNode") { + // Skip as its not linked + if (!linksFrom) return; + + let type = linksFrom["0"][5]; + if (type === "COMBO") { + // Use the array items + const source = node.outputs[0].widget.name; + const fromTypeName = this.nodeData.nodes[linksFrom["0"][2]].type; + const fromType = globalDefs[fromTypeName]; + const input = fromType.input.required[source] ?? fromType.input.optional[source]; + type = input[0]; + } + + const def = (this.primitiveDefs[node.index] = { + input: { + required: { + value: [type, {}], + }, + }, + output: [type], + output_name: [], + output_is_list: [], + }); + return def; + } else if (node.type === "Reroute") { + const linksTo = this.linksTo[node.index]; + if (linksTo && linksFrom && !this.externalFrom[node.index]?.[0]) { + // Being used internally + return null; + } + + let rerouteType = "*"; + if (linksFrom) { + const [, , id, slot] = linksFrom["0"]; + rerouteType = this.nodeData.nodes[id].inputs[slot].type; + } else if (linksTo) { + const [id, slot] = linksTo["0"]; + rerouteType = this.nodeData.nodes[id].outputs[slot].type; + } else { + // Reroute used as a pipe + for (const l of this.nodeData.links) { + if (l[2] === node.index) { + rerouteType = l[5]; + break; + } + } + if (rerouteType === "*") { + // Check for an external link + const t = this.externalFrom[node.index]?.[0]; + if (t) { + rerouteType = t; + } + } + } + + return { + input: { + required: { + [rerouteType]: [rerouteType, {}], + }, + }, + output: [rerouteType], + output_name: [], + output_is_list: [], + }; + } + + console.warn("Skipping virtual node " + node.type + " when building group node " + this.name); + } + + getInputConfig(node, inputName, seenInputs, config, extra) { + let name = node.inputs?.find((inp) => inp.name === inputName)?.label ?? inputName; + let prefix = ""; + // Special handling for primitive to include the title if it is set rather than just "value" + if ((node.type === "PrimitiveNode" && node.title) || name in seenInputs) { + prefix = `${node.title ?? node.type} `; + name = `${prefix}${inputName}`; + if (name in seenInputs) { + name = `${prefix}${seenInputs[name]} ${inputName}`; + } + } + seenInputs[name] = (seenInputs[name] ?? 1) + 1; + + if (inputName === "seed" || inputName === "noise_seed") { + if (!extra) extra = {}; + extra.control_after_generate = `${prefix}control_after_generate`; + } + if (config[0] === "IMAGEUPLOAD") { + if (!extra) extra = {}; + extra.widget = `${prefix}${config[1]?.widget ?? "image"}`; + } + + if (extra) { + config = [config[0], { ...config[1], ...extra }]; + } + + return { name, config }; + } + + processWidgetInputs(inputs, node, inputNames, seenInputs) { + const slots = []; + const converted = new Map(); + const widgetMap = (this.oldToNewWidgetMap[node.index] = {}); + for (const inputName of inputNames) { + let widgetType = getWidgetType(inputs[inputName], inputName); + if (widgetType) { + const convertedIndex = node.inputs?.findIndex( + (inp) => inp.name === inputName && inp.widget?.name === inputName + ); + if (convertedIndex > -1) { + // This widget has been converted to a widget + // We need to store this in the correct position so link ids line up + converted.set(convertedIndex, inputName); + widgetMap[inputName] = null; + } else { + // Normal widget + const { name, config } = this.getInputConfig(node, inputName, seenInputs, inputs[inputName]); + this.nodeDef.input.required[name] = config; + widgetMap[inputName] = name; + this.newToOldWidgetMap[name] = { node, inputName }; + } + } else { + // Normal input + slots.push(inputName); + } + } + return { converted, slots }; + } + + checkPrimitiveConnection(link, inputName, inputs) { + const sourceNode = this.nodeData.nodes[link[0]]; + if (sourceNode.type === "PrimitiveNode") { + // Merge link configurations + const [sourceNodeId, _, targetNodeId, __] = link; + const primitiveDef = this.primitiveDefs[sourceNodeId]; + const targetWidget = inputs[inputName]; + const primitiveConfig = primitiveDef.input.required.value; + const output = { widget: primitiveConfig }; + const config = mergeIfValid(output, targetWidget, false, null, primitiveConfig); + primitiveConfig[1] = config?.customConfig ?? inputs[inputName][1] ? { ...inputs[inputName][1] } : {}; + + let name = this.oldToNewWidgetMap[sourceNodeId]["value"]; + name = name.substr(0, name.length - 6); + primitiveConfig[1].control_after_generate = true; + primitiveConfig[1].control_prefix = name; + + let toPrimitive = this.widgetToPrimitive[targetNodeId]; + if (!toPrimitive) { + toPrimitive = this.widgetToPrimitive[targetNodeId] = {}; + } + if (toPrimitive[inputName]) { + toPrimitive[inputName].push(sourceNodeId); + } + toPrimitive[inputName] = sourceNodeId; + + let toWidget = this.primitiveToWidget[sourceNodeId]; + if (!toWidget) { + toWidget = this.primitiveToWidget[sourceNodeId] = []; + } + toWidget.push({ nodeId: targetNodeId, inputName }); + } + } + + processInputSlots(inputs, node, slots, linksTo, inputMap, seenInputs) { + for (let i = 0; i < slots.length; i++) { + const inputName = slots[i]; + if (linksTo[i]) { + this.checkPrimitiveConnection(linksTo[i], inputName, inputs); + // This input is linked so we can skip it + continue; + } + + const { name, config } = this.getInputConfig(node, inputName, seenInputs, inputs[inputName]); + this.nodeDef.input.required[name] = config; + inputMap[i] = this.inputCount++; + } + } + + processConvertedWidgets(inputs, node, slots, converted, linksTo, inputMap, seenInputs) { + // Add converted widgets sorted into their index order (ordered as they were converted) so link ids match up + const convertedSlots = [...converted.keys()].sort().map((k) => converted.get(k)); + for (let i = 0; i < convertedSlots.length; i++) { + const inputName = convertedSlots[i]; + if (linksTo[slots.length + i]) { + this.checkPrimitiveConnection(linksTo[slots.length + i], inputName, inputs); + // This input is linked so we can skip it + continue; + } + + const { name, config } = this.getInputConfig(node, inputName, seenInputs, inputs[inputName], { + defaultInput: true, + }); + this.nodeDef.input.required[name] = config; + inputMap[slots.length + i] = this.inputCount++; + } + } + + processNodeInputs(node, seenInputs, inputs) { + const inputMapping = []; + + const inputNames = Object.keys(inputs); + if (!inputNames.length) return; + + const { converted, slots } = this.processWidgetInputs(inputs, node, inputNames, seenInputs); + const linksTo = this.linksTo[node.index] ?? {}; + const inputMap = (this.oldToNewInputMap[node.index] = {}); + this.processInputSlots(inputs, node, slots, linksTo, inputMap, seenInputs); + this.processConvertedWidgets(inputs, node, slots, converted, linksTo, inputMap, seenInputs); + + return inputMapping; + } + + processNodeOutputs(node, seenOutputs, def) { + const oldToNew = (this.oldToNewOutputMap[node.index] = {}); + + // Add outputs + for (let outputId = 0; outputId < def.output.length; outputId++) { + const linksFrom = this.linksFrom[node.index]; + if (linksFrom?.[outputId] && !this.externalFrom[node.index]?.[outputId]) { + // This output is linked internally so we can skip it + continue; + } + + oldToNew[outputId] = this.nodeDef.output.length; + this.newToOldOutputMap[this.nodeDef.output.length] = { node, slot: outputId }; + this.nodeDef.output.push(def.output[outputId]); + this.nodeDef.output_is_list.push(def.output_is_list[outputId]); + + let label = def.output_name?.[outputId] ?? def.output[outputId]; + const output = node.outputs.find((o) => o.name === label); + if (output?.label) { + label = output.label; + } + let name = label; + if (name in seenOutputs) { + const prefix = `${node.title ?? node.type} `; + name = `${prefix}${label}`; + if (name in seenOutputs) { + name = `${prefix}${node.index} ${label}`; + } + } + seenOutputs[name] = 1; + + this.nodeDef.output_name.push(name); + } + } + + static async registerFromWorkflow(groupNodes, missingNodeTypes) { + for (const g in groupNodes) { + const groupData = groupNodes[g]; + + let hasMissing = false; + for (const n of groupData.nodes) { + // Find missing node types + if (!(n.type in LiteGraph.registered_node_types)) { + missingNodeTypes.push(n.type); + hasMissing = true; + } + } + + if (hasMissing) continue; + + const config = new GroupNodeConfig(g, groupData); + await config.registerType(); + } + } +} + +export class GroupNodeHandler { + node; + groupData; + + constructor(node) { + this.node = node; + this.groupData = node.constructor?.nodeData?.[GROUP]; + + this.node.setInnerNodes = (innerNodes) => { + this.innerNodes = innerNodes; + + for (let innerNodeIndex = 0; innerNodeIndex < this.innerNodes.length; innerNodeIndex++) { + const innerNode = this.innerNodes[innerNodeIndex]; + + for (const w of innerNode.widgets ?? []) { + if (w.type === "converted-widget") { + w.serializeValue = w.origSerializeValue; + } + } + + innerNode.index = innerNodeIndex; + innerNode.getInputNode = (slot) => { + // Check if this input is internal or external + const externalSlot = this.groupData.oldToNewInputMap[innerNode.index]?.[slot]; + if (externalSlot != null) { + return this.node.getInputNode(externalSlot); + } + + // Internal link + const innerLink = this.groupData.linksTo[innerNode.index]?.[slot]; + if (!innerLink) return null; + + const inputNode = innerNodes[innerLink[0]]; + // Primitives will already apply their values + if (inputNode.type === "PrimitiveNode") return null; + + return inputNode; + }; + + innerNode.getInputLink = (slot) => { + const externalSlot = this.groupData.oldToNewInputMap[innerNode.index]?.[slot]; + if (externalSlot != null) { + // The inner node is connected via the group node inputs + const linkId = this.node.inputs[externalSlot].link; + let link = app.graph.links[linkId]; + + // Use the outer link, but update the target to the inner node + link = { + ...link, + target_id: innerNode.id, + target_slot: +slot, + }; + return link; + } + + let link = this.groupData.linksTo[innerNode.index]?.[slot]; + if (!link) return null; + // Use the inner link, but update the origin node to be inner node id + link = { + origin_id: innerNodes[link[0]].id, + origin_slot: link[1], + target_id: innerNode.id, + target_slot: +slot, + }; + return link; + }; + } + }; + + this.node.updateLink = (link) => { + // Replace the group node reference with the internal node + link = { ...link }; + const output = this.groupData.newToOldOutputMap[link.origin_slot]; + let innerNode = this.innerNodes[output.node.index]; + let l; + while (innerNode.type === "Reroute") { + l = innerNode.getInputLink(0); + innerNode = innerNode.getInputNode(0); + } + + link.origin_id = innerNode.id; + link.origin_slot = l?.origin_slot ?? output.slot; + return link; + }; + + this.node.getInnerNodes = () => { + if (!this.innerNodes) { + this.node.setInnerNodes( + this.groupData.nodeData.nodes.map((n, i) => { + const innerNode = LiteGraph.createNode(n.type); + innerNode.configure(n); + innerNode.id = `${this.node.id}:${i}`; + return innerNode; + }) + ); + } + + this.updateInnerWidgets(); + + return this.innerNodes; + }; + + this.node.convertToNodes = () => { + const addInnerNodes = () => { + const backup = localStorage.getItem("litegrapheditor_clipboard"); + // Clone the node data so we dont mutate it for other nodes + const c = { ...this.groupData.nodeData }; + c.nodes = [...c.nodes]; + const innerNodes = this.node.getInnerNodes(); + let ids = []; + for (let i = 0; i < c.nodes.length; i++) { + let id = innerNodes?.[i]?.id; + // Use existing IDs if they are set on the inner nodes + if (id == null || isNaN(id)) { + id = undefined; + } else { + ids.push(id); + } + c.nodes[i] = { ...c.nodes[i], id }; + } + localStorage.setItem("litegrapheditor_clipboard", JSON.stringify(c)); + app.canvas.pasteFromClipboard(); + localStorage.setItem("litegrapheditor_clipboard", backup); + + const [x, y] = this.node.pos; + let top; + let left; + // Configure nodes with current widget data + const selectedIds = ids.length ? ids : Object.keys(app.canvas.selected_nodes); + const newNodes = []; + for (let i = 0; i < selectedIds.length; i++) { + const id = selectedIds[i]; + const newNode = app.graph.getNodeById(id); + const innerNode = innerNodes[i]; + newNodes.push(newNode); + + if (left == null || newNode.pos[0] < left) { + left = newNode.pos[0]; + } + if (top == null || newNode.pos[1] < top) { + top = newNode.pos[1]; + } + + const map = this.groupData.oldToNewWidgetMap[innerNode.index]; + if (map) { + const widgets = Object.keys(map); + + for (const oldName of widgets) { + const newName = map[oldName]; + if (!newName) continue; + + const widgetIndex = this.node.widgets.findIndex((w) => w.name === newName); + if (widgetIndex === -1) continue; + + // Populate the main and any linked widgets + if (innerNode.type === "PrimitiveNode") { + for (let i = 0; i < newNode.widgets.length; i++) { + newNode.widgets[i].value = this.node.widgets[widgetIndex + i].value; + } + } else { + const outerWidget = this.node.widgets[widgetIndex]; + const newWidget = newNode.widgets.find((w) => w.name === oldName); + if (!newWidget) continue; + + newWidget.value = outerWidget.value; + for (let w = 0; w < outerWidget.linkedWidgets?.length; w++) { + newWidget.linkedWidgets[w].value = outerWidget.linkedWidgets[w].value; + } + } + } + } + } + + // Shift each node + for (const newNode of newNodes) { + newNode.pos = [newNode.pos[0] - (left - x), newNode.pos[1] - (top - y)]; + } + + return { newNodes, selectedIds }; + }; + + const reconnectInputs = (selectedIds) => { + for (const innerNodeIndex in this.groupData.oldToNewInputMap) { + const id = selectedIds[innerNodeIndex]; + const newNode = app.graph.getNodeById(id); + const map = this.groupData.oldToNewInputMap[innerNodeIndex]; + for (const innerInputId in map) { + const groupSlotId = map[innerInputId]; + if (groupSlotId == null) continue; + const slot = node.inputs[groupSlotId]; + if (slot.link == null) continue; + const link = app.graph.links[slot.link]; + // connect this node output to the input of another node + const originNode = app.graph.getNodeById(link.origin_id); + originNode.connect(link.origin_slot, newNode, +innerInputId); + } + } + }; + + const reconnectOutputs = () => { + for (let groupOutputId = 0; groupOutputId < node.outputs?.length; groupOutputId++) { + const output = node.outputs[groupOutputId]; + if (!output.links) continue; + const links = [...output.links]; + for (const l of links) { + const slot = this.groupData.newToOldOutputMap[groupOutputId]; + const link = app.graph.links[l]; + const targetNode = app.graph.getNodeById(link.target_id); + const newNode = app.graph.getNodeById(selectedIds[slot.node.index]); + newNode.connect(slot.slot, targetNode, link.target_slot); + } + } + }; + + const { newNodes, selectedIds } = addInnerNodes(); + reconnectInputs(selectedIds); + reconnectOutputs(selectedIds); + app.graph.remove(this.node); + + return newNodes; + }; + + const getExtraMenuOptions = this.node.getExtraMenuOptions; + this.node.getExtraMenuOptions = function (_, options) { + getExtraMenuOptions?.apply(this, arguments); + + let optionIndex = options.findIndex((o) => o.content === "Outputs"); + if (optionIndex === -1) optionIndex = options.length; + else optionIndex++; + options.splice(optionIndex, 0, null, { + content: "Convert to nodes", + callback: () => { + return this.convertToNodes(); + }, + }); + }; + + // Draw custom collapse icon to identity this as a group + const onDrawTitleBox = this.node.onDrawTitleBox; + this.node.onDrawTitleBox = function (ctx, height, size, scale) { + onDrawTitleBox?.apply(this, arguments); + + const fill = ctx.fillStyle; + ctx.beginPath(); + ctx.rect(11, -height + 11, 2, 2); + ctx.rect(14, -height + 11, 2, 2); + ctx.rect(17, -height + 11, 2, 2); + ctx.rect(11, -height + 14, 2, 2); + ctx.rect(14, -height + 14, 2, 2); + ctx.rect(17, -height + 14, 2, 2); + ctx.rect(11, -height + 17, 2, 2); + ctx.rect(14, -height + 17, 2, 2); + ctx.rect(17, -height + 17, 2, 2); + + ctx.fillStyle = this.boxcolor || LiteGraph.NODE_DEFAULT_BOXCOLOR; + ctx.fill(); + ctx.fillStyle = fill; + }; + + // Draw progress label + const onDrawForeground = node.onDrawForeground; + const groupData = this.groupData.nodeData; + node.onDrawForeground = function (ctx) { + const r = onDrawForeground?.apply?.(this, arguments); + if (+app.runningNodeId === this.id && this.runningInternalNodeId !== null) { + const n = groupData.nodes[this.runningInternalNodeId]; + const message = `Running ${n.title || n.type} (${this.runningInternalNodeId}/${groupData.nodes.length})`; + ctx.save(); + ctx.font = "12px sans-serif"; + const sz = ctx.measureText(message); + ctx.fillStyle = node.boxcolor || LiteGraph.NODE_DEFAULT_BOXCOLOR; + ctx.beginPath(); + ctx.roundRect(0, -LiteGraph.NODE_TITLE_HEIGHT - 20, sz.width + 12, 20, 5); + ctx.fill(); + + ctx.fillStyle = "#fff"; + ctx.fillText(message, 6, -LiteGraph.NODE_TITLE_HEIGHT - 6); + ctx.restore(); + } + }; + + // Flag this node as needing to be reset + const onExecutionStart = this.node.onExecutionStart; + this.node.onExecutionStart = function () { + this.resetExecution = true; + return onExecutionStart?.apply(this, arguments); + }; + + function handleEvent(type, getId, getEvent) { + const handler = ({ detail }) => { + const id = getId(detail); + if (!id) return; + const node = app.graph.getNodeById(id); + if (node) return; + + const innerNodeIndex = this.innerNodes?.findIndex((n) => n.id == id); + if (innerNodeIndex > -1) { + this.node.runningInternalNodeId = innerNodeIndex; + api.dispatchEvent(new CustomEvent(type, { detail: getEvent(detail, this.node.id + "", this.node) })); + } + }; + api.addEventListener(type, handler); + return handler; + } + + const executing = handleEvent.call( + this, + "executing", + (d) => d, + (d, id, node) => id + ); + + const executed = handleEvent.call( + this, + "executed", + (d) => d?.node, + (d, id, node) => ({ ...d, node: id, merge: !node.resetExecution }) + ); + + const onRemoved = node.onRemoved; + this.node.onRemoved = function () { + onRemoved?.apply(this, arguments); + api.removeEventListener("executing", executing); + api.removeEventListener("executed", executed); + }; + } + + updateInnerWidgets() { + for (const newWidgetName in this.groupData.newToOldWidgetMap) { + const newWidget = this.node.widgets.find((w) => w.name === newWidgetName); + if (!newWidget) continue; + + const newValue = newWidget.value; + const old = this.groupData.newToOldWidgetMap[newWidgetName]; + let innerNode = this.innerNodes[old.node.index]; + + if (innerNode.type === "PrimitiveNode") { + innerNode.primitiveValue = newValue; + const primitiveLinked = this.groupData.primitiveToWidget[old.node.index]; + for (const linked of primitiveLinked) { + const node = this.innerNodes[linked.nodeId]; + const widget = node.widgets.find((w) => w.name === linked.inputName); + + if (widget) { + widget.value = newValue; + } + } + continue; + } + + const widget = innerNode.widgets?.find((w) => w.name === old.inputName); + if (widget) { + widget.value = newValue; + } + } + } + + populatePrimitive(node, nodeId, oldName, i, linkedShift) { + // Converted widget, populate primitive if linked + const primitiveId = this.groupData.widgetToPrimitive[nodeId]?.[oldName]; + if (primitiveId == null) return; + const targetWidgetName = this.groupData.oldToNewWidgetMap[primitiveId]["value"]; + const targetWidgetIndex = this.node.widgets.findIndex((w) => w.name === targetWidgetName); + if (targetWidgetIndex > -1) { + const primitiveNode = this.innerNodes[primitiveId]; + let len = primitiveNode.widgets.length; + if (len - 1 !== this.node.widgets[targetWidgetIndex].linkedWidgets?.length) { + // Fallback handling for if some reason the primitive has a different number of widgets + // we dont want to overwrite random widgets, better to leave blank + len = 1; + } + for (let i = 0; i < len; i++) { + this.node.widgets[targetWidgetIndex + i].value = primitiveNode.widgets[i].value; + } + } + } + + populateWidgets() { + for (let nodeId = 0; nodeId < this.groupData.nodeData.nodes.length; nodeId++) { + const node = this.groupData.nodeData.nodes[nodeId]; + + if (!node.widgets_values?.length) continue; + + const map = this.groupData.oldToNewWidgetMap[nodeId]; + const widgets = Object.keys(map); + + let linkedShift = 0; + for (let i = 0; i < widgets.length; i++) { + const oldName = widgets[i]; + const newName = map[oldName]; + const widgetIndex = this.node.widgets.findIndex((w) => w.name === newName); + const mainWidget = this.node.widgets[widgetIndex]; + if (!newName) { + // New name will be null if its a converted widget + this.populatePrimitive(node, nodeId, oldName, i, linkedShift); + + // Find the inner widget and shift by the number of linked widgets as they will have been removed too + const innerWidget = this.innerNodes[nodeId].widgets?.find((w) => w.name === oldName); + linkedShift += innerWidget.linkedWidgets?.length ?? 0; + continue; + } + + if (widgetIndex === -1) { + continue; + } + + // Populate the main and any linked widget + mainWidget.value = node.widgets_values[i + linkedShift]; + for (let w = 0; w < mainWidget.linkedWidgets?.length; w++) { + this.node.widgets[widgetIndex + w + 1].value = node.widgets_values[i + ++linkedShift]; + } + } + } + } + + replaceNodes(nodes) { + let top; + let left; + + for (let i = 0; i < nodes.length; i++) { + const node = nodes[i]; + if (left == null || node.pos[0] < left) { + left = node.pos[0]; + } + if (top == null || node.pos[1] < top) { + top = node.pos[1]; + } + + this.linkOutputs(node, i); + app.graph.remove(node); + } + + this.linkInputs(); + this.node.pos = [left, top]; + } + + linkOutputs(originalNode, nodeId) { + if (!originalNode.outputs) return; + + for (const output of originalNode.outputs) { + if (!output.links) continue; + // Clone the links as they'll be changed if we reconnect + const links = [...output.links]; + for (const l of links) { + const link = app.graph.links[l]; + if (!link) continue; + + const targetNode = app.graph.getNodeById(link.target_id); + const newSlot = this.groupData.oldToNewOutputMap[nodeId]?.[link.origin_slot]; + if (newSlot != null) { + this.node.connect(newSlot, targetNode, link.target_slot); + } + } + } + } + + linkInputs() { + for (const link of this.groupData.nodeData.links ?? []) { + const [, originSlot, targetId, targetSlot, actualOriginId] = link; + const originNode = app.graph.getNodeById(actualOriginId); + if (!originNode) continue; // this node is in the group + originNode.connect(originSlot, this.node.id, this.groupData.oldToNewInputMap[targetId][targetSlot]); + } + } + + static getGroupData(node) { + return node.constructor?.nodeData?.[GROUP]; + } + + static isGroupNode(node) { + return !!node.constructor?.nodeData?.[GROUP]; + } + + static async fromNodes(nodes) { + // Process the nodes into the stored workflow group node data + const builder = new GroupNodeBuilder(nodes); + const res = builder.build(); + if (!res) return; + + const { name, nodeData } = res; + + // Convert this data into a LG node definition and register it + const config = new GroupNodeConfig(name, nodeData); + await config.registerType(); + + const groupNode = LiteGraph.createNode(`workflow/${name}`); + // Reuse the existing nodes for this instance + groupNode.setInnerNodes(builder.nodes); + groupNode[GROUP].populateWidgets(); + app.graph.add(groupNode); + + // Remove all converted nodes and relink them + groupNode[GROUP].replaceNodes(builder.nodes); + return groupNode; + } +} + +function addConvertToGroupOptions() { + function addOption(options, index) { + const selected = Object.values(app.canvas.selected_nodes ?? {}); + const disabled = selected.length < 2 || selected.find((n) => GroupNodeHandler.isGroupNode(n)); + options.splice(index + 1, null, { + content: `Convert to Group Node`, + disabled, + callback: async () => { + return await GroupNodeHandler.fromNodes(selected); + }, + }); + } + + // Add to canvas + const getCanvasMenuOptions = LGraphCanvas.prototype.getCanvasMenuOptions; + LGraphCanvas.prototype.getCanvasMenuOptions = function () { + const options = getCanvasMenuOptions.apply(this, arguments); + const index = options.findIndex((o) => o?.content === "Add Group") + 1 || opts.length; + addOption(options, index); + return options; + }; + + // Add to nodes + const getNodeMenuOptions = LGraphCanvas.prototype.getNodeMenuOptions; + LGraphCanvas.prototype.getNodeMenuOptions = function (node) { + const options = getNodeMenuOptions.apply(this, arguments); + if (!GroupNodeHandler.isGroupNode(node)) { + const index = options.findIndex((o) => o?.content === "Outputs") + 1 || opts.length - 1; + addOption(options, index); + } + return options; + }; +} + +const id = "Comfy.GroupNode"; +let globalDefs; +const ext = { + name: id, + setup() { + addConvertToGroupOptions(); + }, + async beforeConfigureGraph(graphData, missingNodeTypes) { + const nodes = graphData?.extra?.groupNodes; + if (nodes) { + await GroupNodeConfig.registerFromWorkflow(nodes, missingNodeTypes); + } + }, + addCustomNodeDefs(defs) { + // Store this so we can mutate it later with group nodes + globalDefs = defs; + }, + nodeCreated(node) { + if (GroupNodeHandler.isGroupNode(node)) { + node[GROUP] = new GroupNodeHandler(node); + } + }, +}; + +app.registerExtension(ext); diff --git a/web/extensions/core/nodeTemplates.js b/web/extensions/core/nodeTemplates.js index b6479f454..2d4821742 100644 --- a/web/extensions/core/nodeTemplates.js +++ b/web/extensions/core/nodeTemplates.js @@ -1,5 +1,6 @@ import { app } from "../../scripts/app.js"; import { ComfyDialog, $el } from "../../scripts/ui.js"; +import { GroupNodeConfig, GroupNodeHandler } from "./groupNode.js"; // Adds the ability to save and add multiple nodes as a template // To save: @@ -34,7 +35,7 @@ class ManageTemplates extends ComfyDialog { type: "file", accept: ".json", multiple: true, - style: {display: "none"}, + style: { display: "none" }, parent: document.body, onchange: () => this.importAll(), }); @@ -109,13 +110,13 @@ class ManageTemplates extends ComfyDialog { return; } - const json = JSON.stringify({templates: this.templates}, null, 2); // convert the data to a JSON string - const blob = new Blob([json], {type: "application/json"}); + const json = JSON.stringify({ templates: this.templates }, null, 2); // convert the data to a JSON string + const blob = new Blob([json], { type: "application/json" }); const url = URL.createObjectURL(blob); const a = $el("a", { href: url, download: "node_templates.json", - style: {display: "none"}, + style: { display: "none" }, parent: document.body, }); a.click(); @@ -291,11 +292,11 @@ app.registerExtension({ setup() { const manage = new ManageTemplates(); - const clipboardAction = (cb) => { + const clipboardAction = async (cb) => { // We use the clipboard functions but dont want to overwrite the current user clipboard // Restore it after we've run our callback const old = localStorage.getItem("litegrapheditor_clipboard"); - cb(); + await cb(); localStorage.setItem("litegrapheditor_clipboard", old); }; @@ -309,13 +310,31 @@ app.registerExtension({ disabled: !Object.keys(app.canvas.selected_nodes || {}).length, callback: () => { const name = prompt("Enter name"); - if (!name || !name.trim()) return; + if (!name?.trim()) return; clipboardAction(() => { app.canvas.copyToClipboard(); + let data = localStorage.getItem("litegrapheditor_clipboard"); + data = JSON.parse(data); + const nodeIds = Object.keys(app.canvas.selected_nodes); + for (let i = 0; i < nodeIds.length; i++) { + const node = app.graph.getNodeById(nodeIds[i]); + const nodeData = node?.constructor.nodeData; + + let groupData = GroupNodeHandler.getGroupData(node); + if (groupData) { + groupData = groupData.nodeData; + if (!data.groupNodes) { + data.groupNodes = {}; + } + data.groupNodes[nodeData.name] = groupData; + data.nodes[i].type = nodeData.name; + } + } + manage.templates.push({ name, - data: localStorage.getItem("litegrapheditor_clipboard"), + data: JSON.stringify(data), }); manage.store(); }); @@ -323,15 +342,19 @@ app.registerExtension({ }); // Map each template to a menu item - const subItems = manage.templates.map((t) => ({ - content: t.name, - callback: () => { - clipboardAction(() => { - localStorage.setItem("litegrapheditor_clipboard", t.data); - app.canvas.pasteFromClipboard(); - }); - }, - })); + const subItems = manage.templates.map((t) => { + return { + content: t.name, + callback: () => { + clipboardAction(async () => { + const data = JSON.parse(t.data); + await GroupNodeConfig.registerFromWorkflow(data.groupNodes, {}); + localStorage.setItem("litegrapheditor_clipboard", t.data); + app.canvas.pasteFromClipboard(); + }); + }, + }; + }); subItems.push(null, { content: "Manage", diff --git a/web/extensions/core/widgetInputs.js b/web/extensions/core/widgetInputs.js index 5c8fbc9b2..b6fa411f7 100644 --- a/web/extensions/core/widgetInputs.js +++ b/web/extensions/core/widgetInputs.js @@ -121,6 +121,110 @@ function isValidCombo(combo, obj) { return true; } +export function mergeIfValid(output, config2, forceUpdate, recreateWidget, config1) { + if (!config1) { + config1 = output.widget[CONFIG] ?? output.widget[GET_CONFIG](); + } + + if (config1[0] instanceof Array) { + if (!isValidCombo(config1[0], config2[0])) return false; + } else if (config1[0] !== config2[0]) { + // Types dont match + console.log(`connection rejected: types dont match`, config1[0], config2[0]); + return false; + } + + const keys = new Set([...Object.keys(config1[1] ?? {}), ...Object.keys(config2[1] ?? {})]); + + let customConfig; + const getCustomConfig = () => { + if (!customConfig) { + if (typeof structuredClone === "undefined") { + customConfig = JSON.parse(JSON.stringify(config1[1] ?? {})); + } else { + customConfig = structuredClone(config1[1] ?? {}); + } + } + return customConfig; + }; + + const isNumber = config1[0] === "INT" || config1[0] === "FLOAT"; + for (const k of keys.values()) { + if (k !== "default" && k !== "forceInput" && k !== "defaultInput") { + let v1 = config1[1][k]; + let v2 = config2[1]?.[k]; + + if (v1 === v2 || (!v1 && !v2)) continue; + + if (isNumber) { + if (k === "min") { + const theirMax = config2[1]?.["max"]; + if (theirMax != null && v1 > theirMax) { + console.log("connection rejected: min > max", v1, theirMax); + return false; + } + getCustomConfig()[k] = v1 == null ? v2 : v2 == null ? v1 : Math.max(v1, v2); + continue; + } else if (k === "max") { + const theirMin = config2[1]?.["min"]; + if (theirMin != null && v1 < theirMin) { + console.log("connection rejected: max < min", v1, theirMin); + return false; + } + getCustomConfig()[k] = v1 == null ? v2 : v2 == null ? v1 : Math.min(v1, v2); + continue; + } else if (k === "step") { + let step; + if (v1 == null) { + // No current step + step = v2; + } else if (v2 == null) { + // No new step + step = v1; + } else { + if (v1 < v2) { + // Ensure v1 is larger for the mod + const a = v2; + v2 = v1; + v1 = a; + } + if (v1 % v2) { + console.log("connection rejected: steps not divisible", "current:", v1, "new:", v2); + return false; + } + + step = v1; + } + + getCustomConfig()[k] = step; + continue; + } + } + + console.log(`connection rejected: config ${k} values dont match`, v1, v2); + return false; + } + } + + if (customConfig || forceUpdate) { + if (customConfig) { + output.widget[CONFIG] = [config1[0], customConfig]; + } + + const widget = recreateWidget?.call(this); + // When deleting a node this can be null + if (widget) { + const min = widget.options.min; + const max = widget.options.max; + if (min != null && widget.value < min) widget.value = min; + if (max != null && widget.value > max) widget.value = max; + widget.callback(widget.value); + } + } + + return { customConfig }; +} + app.registerExtension({ name: "Comfy.WidgetInputs", async beforeRegisterNodeDef(nodeType, nodeData, app) { @@ -308,7 +412,7 @@ app.registerExtension({ this.isVirtualNode = true; } - applyToGraph() { + applyToGraph(extraLinks = []) { if (!this.outputs[0].links?.length) return; function get_links(node) { @@ -325,10 +429,9 @@ app.registerExtension({ return links; } - let links = get_links(this); + let links = [...get_links(this).map((l) => app.graph.links[l]), ...extraLinks]; // For each output link copy our value over the original widget value - for (const l of links) { - const linkInfo = app.graph.links[l]; + for (const linkInfo of links) { const node = this.graph.getNodeById(linkInfo.target_id); const input = node.inputs[linkInfo.target_slot]; const widgetName = input.widget.name; @@ -405,7 +508,12 @@ app.registerExtension({ } if (this.outputs[slot].links?.length) { - return this.#isValidConnection(input); + const valid = this.#isValidConnection(input); + if (valid) { + // On connect of additional outputs, copy our value to their widget + this.applyToGraph([{ target_id: target_node.id, target_slot }]); + } + return valid; } } @@ -462,12 +570,12 @@ app.registerExtension({ } } - if (widget.type === "number" || widget.type === "combo") { + if (!inputData?.[1]?.control_after_generate && (widget.type === "number" || widget.type === "combo")) { let control_value = this.widgets_values?.[1]; if (!control_value) { control_value = "fixed"; } - addValueControlWidgets(this, widget, control_value); + addValueControlWidgets(this, widget, control_value, undefined, inputData); let filter = this.widgets_values?.[2]; if(filter && this.widgets.length === 3) { this.widgets[2].value = filter; @@ -507,6 +615,7 @@ app.registerExtension({ this.#removeWidgets(); this.#onFirstConnection(true); for (let i = 0; i < this.widgets?.length; i++) this.widgets[i].value = values[i]; + return this.widgets[0]; } #mergeWidgetConfig() { @@ -547,108 +656,8 @@ app.registerExtension({ #isValidConnection(input, forceUpdate) { // Only allow connections where the configs match const output = this.outputs[0]; - const config1 = output.widget[CONFIG] ?? output.widget[GET_CONFIG](); const config2 = input.widget[GET_CONFIG](); - - if (config1[0] instanceof Array) { - if (!isValidCombo(config1[0], config2[0])) return false; - } else if (config1[0] !== config2[0]) { - // Types dont match - console.log(`connection rejected: types dont match`, config1[0], config2[0]); - return false; - } - - const keys = new Set([...Object.keys(config1[1] ?? {}), ...Object.keys(config2[1] ?? {})]); - - let customConfig; - const getCustomConfig = () => { - if (!customConfig) { - if (typeof structuredClone === "undefined") { - customConfig = JSON.parse(JSON.stringify(config1[1] ?? {})); - } else { - customConfig = structuredClone(config1[1] ?? {}); - } - } - return customConfig; - }; - - const isNumber = config1[0] === "INT" || config1[0] === "FLOAT"; - for (const k of keys.values()) { - if (k !== "default" && k !== "forceInput" && k !== "defaultInput") { - let v1 = config1[1][k]; - let v2 = config2[1][k]; - - if (v1 === v2 || (!v1 && !v2)) continue; - - if (isNumber) { - if (k === "min") { - const theirMax = config2[1]["max"]; - if (theirMax != null && v1 > theirMax) { - console.log("connection rejected: min > max", v1, theirMax); - return false; - } - getCustomConfig()[k] = v1 == null ? v2 : v2 == null ? v1 : Math.max(v1, v2); - continue; - } else if (k === "max") { - const theirMin = config2[1]["min"]; - if (theirMin != null && v1 < theirMin) { - console.log("connection rejected: max < min", v1, theirMin); - return false; - } - getCustomConfig()[k] = v1 == null ? v2 : v2 == null ? v1 : Math.min(v1, v2); - continue; - } else if (k === "step") { - let step; - if (v1 == null) { - // No current step - step = v2; - } else if (v2 == null) { - // No new step - step = v1; - } else { - if (v1 < v2) { - // Ensure v1 is larger for the mod - const a = v2; - v2 = v1; - v1 = a; - } - if (v1 % v2) { - console.log("connection rejected: steps not divisible", "current:", v1, "new:", v2); - return false; - } - - step = v1; - } - - getCustomConfig()[k] = step; - continue; - } - } - - console.log(`connection rejected: config ${k} values dont match`, v1, v2); - return false; - } - } - - if (customConfig || forceUpdate) { - if (customConfig) { - output.widget[CONFIG] = [config1[0], customConfig]; - } - - this.#recreateWidget(); - - const widget = this.widgets[0]; - // When deleting a node this can be null - if (widget) { - const min = widget.options.min; - const max = widget.options.max; - if (min != null && widget.value < min) widget.value = min; - if (max != null && widget.value > max) widget.value = max; - widget.callback(widget.value); - } - } - - return true; + return !!mergeIfValid.call(this, output, config2, forceUpdate, this.#recreateWidget); } #removeWidgets() { diff --git a/web/scripts/app.js b/web/scripts/app.js index cd20c40fd..e9cfb277d 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -1,5 +1,5 @@ import { ComfyLogging } from "./logging.js"; -import { ComfyWidgets } from "./widgets.js"; +import { ComfyWidgets, getWidgetType } from "./widgets.js"; import { ComfyUI, $el } from "./ui.js"; import { api } from "./api.js"; import { defaultGraph } from "./defaultGraph.js"; @@ -779,7 +779,7 @@ export class ComfyApp { * Adds a handler on paste that extracts and loads images or workflows from pasted JSON data */ #addPasteHandler() { - document.addEventListener("paste", (e) => { + document.addEventListener("paste", async (e) => { // ctrl+shift+v is used to paste nodes with connections // this is handled by litegraph if(this.shiftDown) return; @@ -827,7 +827,7 @@ export class ComfyApp { } if (workflow && workflow.version && workflow.nodes && workflow.extra) { - this.loadGraphData(workflow); + await this.loadGraphData(workflow); } else { if (e.target.type === "text" || e.target.type === "textarea") { @@ -1177,7 +1177,19 @@ export class ComfyApp { }); api.addEventListener("executed", ({ detail }) => { - this.nodeOutputs[detail.node] = detail.output; + const output = this.nodeOutputs[detail.node]; + if (detail.merge && output) { + for (const k in detail.output ?? {}) { + const v = output[k]; + if (v instanceof Array) { + output[k] = v.concat(detail.output[k]); + } else { + output[k] = detail.output[k]; + } + } + } else { + this.nodeOutputs[detail.node] = detail.output; + } const node = this.graph.getNodeById(detail.node); if (node) { if (node.onExecuted) @@ -1292,6 +1304,7 @@ export class ComfyApp { this.#addProcessMouseHandler(); this.#addProcessKeyHandler(); this.#addConfigureHandler(); + this.#addApiUpdateHandlers(); this.graph = new LGraph(); @@ -1328,7 +1341,7 @@ export class ComfyApp { const json = localStorage.getItem("workflow"); if (json) { const workflow = JSON.parse(json); - this.loadGraphData(workflow); + await this.loadGraphData(workflow); restored = true; } } catch (err) { @@ -1337,7 +1350,7 @@ export class ComfyApp { // We failed to restore a workflow so load the default if (!restored) { - this.loadGraphData(); + await this.loadGraphData(); } // Save current workflow automatically @@ -1345,7 +1358,6 @@ export class ComfyApp { this.#addDrawNodeHandler(); this.#addDrawGroupsHandler(); - this.#addApiUpdateHandlers(); this.#addDropHandler(); this.#addCopyHandler(); this.#addPasteHandler(); @@ -1365,11 +1377,81 @@ export class ComfyApp { await this.#invokeExtensionsAsync("registerCustomNodes"); } + async registerNodeDef(nodeId, nodeData) { + const self = this; + const node = Object.assign( + function ComfyNode() { + var inputs = nodeData["input"]["required"]; + if (nodeData["input"]["optional"] != undefined) { + inputs = Object.assign({}, nodeData["input"]["required"], nodeData["input"]["optional"]); + } + const config = { minWidth: 1, minHeight: 1 }; + for (const inputName in inputs) { + const inputData = inputs[inputName]; + const type = inputData[0]; + + let widgetCreated = true; + const widgetType = getWidgetType(inputData, inputName); + if(widgetType) { + if(widgetType === "COMBO") { + Object.assign(config, self.widgets.COMBO(this, inputName, inputData, app) || {}); + } else { + Object.assign(config, self.widgets[widgetType](this, inputName, inputData, app) || {}); + } + } else { + // Node connection inputs + this.addInput(inputName, type); + widgetCreated = false; + } + + if(widgetCreated && inputData[1]?.forceInput && config?.widget) { + if (!config.widget.options) config.widget.options = {}; + config.widget.options.forceInput = inputData[1].forceInput; + } + if(widgetCreated && inputData[1]?.defaultInput && config?.widget) { + if (!config.widget.options) config.widget.options = {}; + config.widget.options.defaultInput = inputData[1].defaultInput; + } + } + + for (const o in nodeData["output"]) { + let output = nodeData["output"][o]; + if(output instanceof Array) output = "COMBO"; + const outputName = nodeData["output_name"][o] || output; + const outputShape = nodeData["output_is_list"][o] ? LiteGraph.GRID_SHAPE : LiteGraph.CIRCLE_SHAPE ; + this.addOutput(outputName, output, { shape: outputShape }); + } + + const s = this.computeSize(); + s[0] = Math.max(config.minWidth, s[0] * 1.5); + s[1] = Math.max(config.minHeight, s[1]); + this.size = s; + this.serialize_widgets = true; + + app.#invokeExtensionsAsync("nodeCreated", this); + }, + { + title: nodeData.display_name || nodeData.name, + comfyClass: nodeData.name, + nodeData + } + ); + node.prototype.comfyClass = nodeData.name; + + this.#addNodeContextMenuHandler(node); + this.#addDrawBackgroundHandler(node, app); + this.#addNodeKeyHandler(node); + + await this.#invokeExtensionsAsync("beforeRegisterNodeDef", node, nodeData); + LiteGraph.registerNodeType(nodeId, node); + node.category = nodeData.category; + } + async registerNodesFromDefs(defs) { await this.#invokeExtensionsAsync("addCustomNodeDefs", defs); // Generate list of known widgets - const widgets = Object.assign( + this.widgets = Object.assign( {}, ComfyWidgets, ...(await this.#invokeExtensionsAsync("getCustomWidgets")).filter(Boolean) @@ -1377,75 +1459,7 @@ export class ComfyApp { // Register a node for each definition for (const nodeId in defs) { - const nodeData = defs[nodeId]; - const node = Object.assign( - function ComfyNode() { - var inputs = nodeData["input"]["required"]; - if (nodeData["input"]["optional"] != undefined){ - inputs = Object.assign({}, nodeData["input"]["required"], nodeData["input"]["optional"]) - } - const config = { minWidth: 1, minHeight: 1 }; - for (const inputName in inputs) { - const inputData = inputs[inputName]; - const type = inputData[0]; - - let widgetCreated = true; - if (Array.isArray(type)) { - // Enums - Object.assign(config, widgets.COMBO(this, inputName, inputData, app) || {}); - } else if (`${type}:${inputName}` in widgets) { - // Support custom widgets by Type:Name - Object.assign(config, widgets[`${type}:${inputName}`](this, inputName, inputData, app) || {}); - } else if (type in widgets) { - // Standard type widgets - Object.assign(config, widgets[type](this, inputName, inputData, app) || {}); - } else { - // Node connection inputs - this.addInput(inputName, type); - widgetCreated = false; - } - - if(widgetCreated && inputData[1]?.forceInput && config?.widget) { - if (!config.widget.options) config.widget.options = {}; - config.widget.options.forceInput = inputData[1].forceInput; - } - if(widgetCreated && inputData[1]?.defaultInput && config?.widget) { - if (!config.widget.options) config.widget.options = {}; - config.widget.options.defaultInput = inputData[1].defaultInput; - } - } - - for (const o in nodeData["output"]) { - let output = nodeData["output"][o]; - if(output instanceof Array) output = "COMBO"; - const outputName = nodeData["output_name"][o] || output; - const outputShape = nodeData["output_is_list"][o] ? LiteGraph.GRID_SHAPE : LiteGraph.CIRCLE_SHAPE ; - this.addOutput(outputName, output, { shape: outputShape }); - } - - const s = this.computeSize(); - s[0] = Math.max(config.minWidth, s[0] * 1.5); - s[1] = Math.max(config.minHeight, s[1]); - this.size = s; - this.serialize_widgets = true; - - app.#invokeExtensionsAsync("nodeCreated", this); - }, - { - title: nodeData.display_name || nodeData.name, - comfyClass: nodeData.name, - nodeData - } - ); - node.prototype.comfyClass = nodeData.name; - - this.#addNodeContextMenuHandler(node); - this.#addDrawBackgroundHandler(node, app); - this.#addNodeKeyHandler(node); - - await this.#invokeExtensionsAsync("beforeRegisterNodeDef", node, nodeData); - LiteGraph.registerNodeType(nodeId, node); - node.category = nodeData.category; + this.registerNodeDef(nodeId, defs[nodeId]); } } @@ -1488,9 +1502,14 @@ export class ComfyApp { showMissingNodesError(missingNodeTypes, hasAddedNodes = true) { this.ui.dialog.show( - `When loading the graph, the following node types were not found:
    ${Array.from(new Set(missingNodeTypes)).map( - (t) => `
  • ${t}
  • ` - ).join("")}
${hasAddedNodes ? "Nodes that have failed to load will show as red on the graph." : ""}` + $el("div", [ + $el("span", { textContent: "When loading the graph, the following node types were not found: " }), + $el( + "ul", + Array.from(new Set(missingNodeTypes)).map((t) => $el("li", { textContent: t })) + ), + ...(hasAddedNodes ? [$el("span", { textContent: "Nodes that have failed to load will show as red on the graph." })] : []), + ]) ); this.logging.addEntry("Comfy.App", "warn", { MissingNodes: missingNodeTypes, @@ -1501,7 +1520,7 @@ export class ComfyApp { * Populates the graph with the specified workflow data * @param {*} graphData A serialized graph object */ - loadGraphData(graphData) { + async loadGraphData(graphData) { this.clean(); let reset_invalid_values = false; @@ -1519,6 +1538,7 @@ export class ComfyApp { } const missingNodeTypes = []; + await this.#invokeExtensionsAsync("beforeConfigureGraph", graphData, missingNodeTypes); for (let n of graphData.nodes) { // Patch T2IAdapterLoader to ControlNetLoader since they are the same node now if (n.type == "T2IAdapterLoader") n.type = "ControlNetLoader"; @@ -1527,8 +1547,8 @@ export class ComfyApp { // Find missing node types if (!(n.type in LiteGraph.registered_node_types)) { - n.type = sanitizeNodeName(n.type); missingNodeTypes.push(n.type); + n.type = sanitizeNodeName(n.type); } } @@ -1627,92 +1647,98 @@ export class ComfyApp { * @returns The workflow and node links */ async graphToPrompt() { - for (const node of this.graph.computeExecutionOrder(false)) { - if (node.isVirtualNode) { - // Don't serialize frontend only nodes but let them make changes - if (node.applyToGraph) { - node.applyToGraph(); + for (const outerNode of this.graph.computeExecutionOrder(false)) { + const innerNodes = outerNode.getInnerNodes ? outerNode.getInnerNodes() : [outerNode]; + for (const node of innerNodes) { + if (node.isVirtualNode) { + // Don't serialize frontend only nodes but let them make changes + if (node.applyToGraph) { + node.applyToGraph(); + } } - continue; } } const workflow = this.graph.serialize(); const output = {}; // Process nodes in order of execution - for (const node of this.graph.computeExecutionOrder(false)) { - const n = workflow.nodes.find((n) => n.id === node.id); + for (const outerNode of this.graph.computeExecutionOrder(false)) { + const innerNodes = outerNode.getInnerNodes ? outerNode.getInnerNodes() : [outerNode]; + for (const node of innerNodes) { + if (node.isVirtualNode) { + continue; + } - if (node.isVirtualNode) { - continue; - } + if (node.mode === 2 || node.mode === 4) { + // Don't serialize muted nodes + continue; + } - if (node.mode === 2 || node.mode === 4) { - // Don't serialize muted nodes - continue; - } + const inputs = {}; + const widgets = node.widgets; - const inputs = {}; - const widgets = node.widgets; - - // Store all widget values - if (widgets) { - for (const i in widgets) { - const widget = widgets[i]; - if (!widget.options || widget.options.serialize !== false) { - inputs[widget.name] = widget.serializeValue ? await widget.serializeValue(n, i) : widget.value; + // Store all widget values + if (widgets) { + for (const i in widgets) { + const widget = widgets[i]; + if (!widget.options || widget.options.serialize !== false) { + inputs[widget.name] = widget.serializeValue ? await widget.serializeValue(node, i) : widget.value; + } } } - } - // Store all node links - for (let i in node.inputs) { - let parent = node.getInputNode(i); - if (parent) { - let link = node.getInputLink(i); - while (parent.mode === 4 || parent.isVirtualNode) { - let found = false; - if (parent.isVirtualNode) { - link = parent.getInputLink(link.origin_slot); - if (link) { - parent = parent.getInputNode(link.target_slot); - if (parent) { - found = true; - } - } - } else if (link && parent.mode === 4) { - let all_inputs = [link.origin_slot]; - if (parent.inputs) { - all_inputs = all_inputs.concat(Object.keys(parent.inputs)) - for (let parent_input in all_inputs) { - parent_input = all_inputs[parent_input]; - if (parent.inputs[parent_input]?.type === node.inputs[i].type) { - link = parent.getInputLink(parent_input); - if (link) { - parent = parent.getInputNode(parent_input); - } + // Store all node links + for (let i in node.inputs) { + let parent = node.getInputNode(i); + if (parent) { + let link = node.getInputLink(i); + while (parent.mode === 4 || parent.isVirtualNode) { + let found = false; + if (parent.isVirtualNode) { + link = parent.getInputLink(link.origin_slot); + if (link) { + parent = parent.getInputNode(link.target_slot); + if (parent) { found = true; - break; + } + } + } else if (link && parent.mode === 4) { + let all_inputs = [link.origin_slot]; + if (parent.inputs) { + all_inputs = all_inputs.concat(Object.keys(parent.inputs)) + for (let parent_input in all_inputs) { + parent_input = all_inputs[parent_input]; + if (parent.inputs[parent_input]?.type === node.inputs[i].type) { + link = parent.getInputLink(parent_input); + if (link) { + parent = parent.getInputNode(parent_input); + } + found = true; + break; + } } } } + + if (!found) { + break; + } } - if (!found) { - break; + if (link) { + if (parent?.updateLink) { + link = parent.updateLink(link); + } + inputs[node.inputs[i].name] = [String(link.origin_id), parseInt(link.origin_slot)]; } } - - if (link) { - inputs[node.inputs[i].name] = [String(link.origin_id), parseInt(link.origin_slot)]; - } } - } - output[String(node.id)] = { - inputs, - class_type: node.comfyClass, - }; + output[String(node.id)] = { + inputs, + class_type: node.comfyClass, + }; + } } // Remove inputs connected to removed nodes @@ -1832,7 +1858,7 @@ export class ComfyApp { const pngInfo = await getPngMetadata(file); if (pngInfo) { if (pngInfo.workflow) { - this.loadGraphData(JSON.parse(pngInfo.workflow)); + await this.loadGraphData(JSON.parse(pngInfo.workflow)); } else if (pngInfo.parameters) { importA1111(this.graph, pngInfo.parameters); } @@ -1848,21 +1874,21 @@ export class ComfyApp { } } else if (file.type === "application/json" || file.name?.endsWith(".json")) { const reader = new FileReader(); - reader.onload = () => { + reader.onload = async () => { const jsonContent = JSON.parse(reader.result); if (jsonContent?.templates) { this.loadTemplateData(jsonContent); } else if(this.isApiJson(jsonContent)) { this.loadApiJson(jsonContent); } else { - this.loadGraphData(jsonContent); + await this.loadGraphData(jsonContent); } }; reader.readAsText(file); } else if (file.name?.endsWith(".latent") || file.name?.endsWith(".safetensors")) { const info = await getLatentMetadata(file); if (info.workflow) { - this.loadGraphData(JSON.parse(info.workflow)); + await this.loadGraphData(JSON.parse(info.workflow)); } } } diff --git a/web/scripts/domWidget.js b/web/scripts/domWidget.js index 07da591cb..37d26f3c5 100644 --- a/web/scripts/domWidget.js +++ b/web/scripts/domWidget.js @@ -44,7 +44,7 @@ function getClipPath(node, element, elRect) { } function computeSize(size) { - if (this.widgets?.[0].last_y == null) return; + if (this.widgets?.[0]?.last_y == null) return; let y = this.widgets[0].last_y; let freeSpace = size[1] - y; @@ -195,7 +195,6 @@ export function addDomClippingSetting() { type: "boolean", defaultValue: enableDomClipping, onChange(value) { - console.log("enableDomClipping", enableDomClipping); enableDomClipping = !!value; }, }); diff --git a/web/scripts/ui.js b/web/scripts/ui.js index 8a58d30b3..ebaf86fe4 100644 --- a/web/scripts/ui.js +++ b/web/scripts/ui.js @@ -462,8 +462,8 @@ class ComfyList { return $el("div", {textContent: item.prompt[0] + ": "}, [ $el("button", { textContent: "Load", - onclick: () => { - app.loadGraphData(item.prompt[3].extra_pnginfo.workflow); + onclick: async () => { + await app.loadGraphData(item.prompt[3].extra_pnginfo.workflow); if (item.outputs) { app.nodeOutputs = item.outputs; } @@ -784,9 +784,9 @@ export class ComfyUI { } }), $el("button", { - id: "comfy-load-default-button", textContent: "Load Default", onclick: () => { + id: "comfy-load-default-button", textContent: "Load Default", onclick: async () => { if (!confirmClear.value || confirm("Load default workflow?")) { - app.loadGraphData() + await app.loadGraphData() } } }), diff --git a/web/scripts/widgets.js b/web/scripts/widgets.js index fbc1d0fc3..de5877e54 100644 --- a/web/scripts/widgets.js +++ b/web/scripts/widgets.js @@ -23,29 +23,73 @@ function getNumberDefaults(inputData, defaultStep, precision, enable_rounding) { return { val: defaultVal, config: { min, max, step: 10.0 * step, round, precision } }; } -export function addValueControlWidget(node, targetWidget, defaultValue = "randomize", values) { - const widgets = addValueControlWidgets(node, targetWidget, defaultValue, values, { +export function getWidgetType(inputData, inputName) { + const type = inputData[0]; + + if (Array.isArray(type)) { + return "COMBO"; + } else if (`${type}:${inputName}` in ComfyWidgets) { + return `${type}:${inputName}`; + } else if (type in ComfyWidgets) { + return type; + } else { + return null; + } +} + +export function addValueControlWidget(node, targetWidget, defaultValue = "randomize", values, widgetName, inputData) { + let name = inputData[1]?.control_after_generate; + if(typeof name !== "string") { + name = widgetName; + } + const widgets = addValueControlWidgets(node, targetWidget, defaultValue, { addFilterList: false, - }); + controlAfterGenerateName: name + }, inputData); return widgets[0]; } -export function addValueControlWidgets(node, targetWidget, defaultValue = "randomize", values, options) { +export function addValueControlWidgets(node, targetWidget, defaultValue = "randomize", options, inputData) { + if (!defaultValue) defaultValue = "randomize"; if (!options) options = {}; - + + const getName = (defaultName, optionName) => { + let name = defaultName; + if (options[optionName]) { + name = options[optionName]; + } else if (typeof inputData?.[1]?.[defaultName] === "string") { + name = inputData?.[1]?.[defaultName]; + } else if (inputData?.[1]?.control_prefix) { + name = inputData?.[1]?.control_prefix + " " + name + } + return name; + } + const widgets = []; - const valueControl = node.addWidget("combo", "control_after_generate", defaultValue, function (v) { }, { - values: ["fixed", "increment", "decrement", "randomize"], - serialize: false, // Don't include this in prompt. - }); + const valueControl = node.addWidget( + "combo", + getName("control_after_generate", "controlAfterGenerateName"), + defaultValue, + function () {}, + { + values: ["fixed", "increment", "decrement", "randomize"], + serialize: false, // Don't include this in prompt. + } + ); widgets.push(valueControl); const isCombo = targetWidget.type === "combo"; let comboFilter; if (isCombo && options.addFilterList !== false) { - comboFilter = node.addWidget("string", "control_filter_list", "", function (v) {}, { - serialize: false, // Don't include this in prompt. - }); + comboFilter = node.addWidget( + "string", + getName("control_filter_list", "controlFilterListName"), + "", + function () {}, + { + serialize: false, // Don't include this in prompt. + } + ); widgets.push(comboFilter); } @@ -96,7 +140,8 @@ export function addValueControlWidgets(node, targetWidget, defaultValue = "rando targetWidget.value = value; targetWidget.callback(value); } - } else { //number + } else { + //number let min = targetWidget.options.min; let max = targetWidget.options.max; // limit to something that javascript can handle @@ -119,32 +164,54 @@ export function addValueControlWidgets(node, targetWidget, defaultValue = "rando default: break; } - /*check if values are over or under their respective - * ranges and set them to min or max.*/ - if (targetWidget.value < min) - targetWidget.value = min; + /*check if values are over or under their respective + * ranges and set them to min or max.*/ + if (targetWidget.value < min) targetWidget.value = min; if (targetWidget.value > max) targetWidget.value = max; targetWidget.callback(targetWidget.value); } - } - + }; return widgets; }; -function seedWidget(node, inputName, inputData, app) { - const seed = ComfyWidgets.INT(node, inputName, inputData, app); - const seedControl = addValueControlWidget(node, seed.widget, "randomize"); +function seedWidget(node, inputName, inputData, app, widgetName) { + const seed = createIntWidget(node, inputName, inputData, app, true); + const seedControl = addValueControlWidget(node, seed.widget, "randomize", undefined, widgetName, inputData); seed.widget.linkedWidgets = [seedControl]; return seed; } + +function createIntWidget(node, inputName, inputData, app, isSeedInput) { + const control = inputData[1]?.control_after_generate; + if (!isSeedInput && control) { + return seedWidget(node, inputName, inputData, app, typeof control === "string" ? control : undefined); + } + + let widgetType = isSlider(inputData[1]["display"], app); + const { val, config } = getNumberDefaults(inputData, 1, 0, true); + Object.assign(config, { precision: 0 }); + return { + widget: node.addWidget( + widgetType, + inputName, + val, + function (v) { + const s = this.options.step / 10; + this.value = Math.round(v / s) * s; + }, + config + ), + }; +} + function addMultilineWidget(node, name, opts, app) { const inputEl = document.createElement("textarea"); inputEl.className = "comfy-multiline-input"; inputEl.value = opts.defaultVal; - inputEl.placeholder = opts.placeholder || ""; + inputEl.placeholder = opts.placeholder || name; const widget = node.addDOMWidget(name, "customtext", inputEl, { getValue() { @@ -156,6 +223,10 @@ function addMultilineWidget(node, name, opts, app) { }); widget.inputEl = inputEl; + inputEl.addEventListener("input", () => { + widget.callback?.(widget.value); + }); + return { minWidth: 400, minHeight: 200, widget }; } @@ -186,21 +257,7 @@ export const ComfyWidgets = { }, config) }; }, INT(node, inputName, inputData, app) { - let widgetType = isSlider(inputData[1]["display"], app); - const { val, config } = getNumberDefaults(inputData, 1, 0, true); - Object.assign(config, { precision: 0 }); - return { - widget: node.addWidget( - widgetType, - inputName, - val, - function (v) { - const s = this.options.step / 10; - this.value = Math.round(v / s) * s; - }, - config - ), - }; + return createIntWidget(node, inputName, inputData, app); }, BOOLEAN(node, inputName, inputData) { let defaultVal = false; @@ -245,10 +302,14 @@ export const ComfyWidgets = { if (inputData[1] && inputData[1].default) { defaultValue = inputData[1].default; } - return { widget: node.addWidget("combo", inputName, defaultValue, () => {}, { values: type }) }; + const res = { widget: node.addWidget("combo", inputName, defaultValue, () => {}, { values: type }) }; + if (inputData[1]?.control_after_generate) { + res.widget.linkedWidgets = addValueControlWidgets(node, res.widget, undefined, undefined, inputData); + } + return res; }, IMAGEUPLOAD(node, inputName, inputData, app) { - const imageWidget = node.widgets.find((w) => w.name === "image"); + const imageWidget = node.widgets.find((w) => w.name === (inputData[1]?.widget ?? "image")); let uploadWidget; function showImage(name) { @@ -362,9 +423,10 @@ export const ComfyWidgets = { document.body.append(fileInput); // Create the button widget for selecting the files - uploadWidget = node.addWidget("button", "choose file to upload", "image", () => { + uploadWidget = node.addWidget("button", inputName, "image", () => { fileInput.click(); }); + uploadWidget.label = "choose file to upload"; uploadWidget.serialize = false; // Add handler to check if an image is being dragged over our node From 6b769bca01bf7de989ab4aaafd8db41a92a87094 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 30 Nov 2023 15:22:32 -0500 Subject: [PATCH 159/170] Do a garbage collect after the interval even if nothing is running. --- execution.py | 6 ++++-- main.py | 45 +++++++++++++++++++++++++++++---------------- 2 files changed, 33 insertions(+), 18 deletions(-) diff --git a/execution.py b/execution.py index bca48a785..7db1f095b 100644 --- a/execution.py +++ b/execution.py @@ -700,10 +700,12 @@ class PromptQueue: self.server.queue_updated() self.not_empty.notify() - def get(self): + def get(self, timeout=None): with self.not_empty: while len(self.queue) == 0: - self.not_empty.wait() + self.not_empty.wait(timeout=timeout) + if timeout is not None and len(self.queue) == 0: + return None item = heapq.heappop(self.queue) i = self.task_counter self.currently_running[i] = copy.deepcopy(item) diff --git a/main.py b/main.py index 3997fbefc..1f9c5f443 100644 --- a/main.py +++ b/main.py @@ -89,23 +89,36 @@ def cuda_malloc_warning(): def prompt_worker(q, server): e = execution.PromptExecutor(server) last_gc_collect = 0 - while True: - item, item_id = q.get() - execution_start_time = time.perf_counter() - prompt_id = item[1] - e.execute(item[2], prompt_id, item[3], item[4]) - q.task_done(item_id, e.outputs_ui) - if server.client_id is not None: - server.send_sync("executing", { "node": None, "prompt_id": prompt_id }, server.client_id) + need_gc = False + gc_collect_interval = 10.0 - current_time = time.perf_counter() - execution_time = current_time - execution_start_time - print("Prompt executed in {:.2f} seconds".format(execution_time)) - if (current_time - last_gc_collect) > 10.0: - gc.collect() - comfy.model_management.soft_empty_cache() - last_gc_collect = current_time - print("gc collect") + while True: + timeout = None + if need_gc: + timeout = max(gc_collect_interval - (current_time - last_gc_collect), 0.0) + + queue_item = q.get(timeout=timeout) + if queue_item is not None: + item, item_id = queue_item + execution_start_time = time.perf_counter() + prompt_id = item[1] + e.execute(item[2], prompt_id, item[3], item[4]) + need_gc = True + q.task_done(item_id, e.outputs_ui) + if server.client_id is not None: + server.send_sync("executing", { "node": None, "prompt_id": prompt_id }, server.client_id) + + current_time = time.perf_counter() + execution_time = current_time - execution_start_time + print("Prompt executed in {:.2f} seconds".format(execution_time)) + + if need_gc: + current_time = time.perf_counter() + if (current_time - last_gc_collect) > gc_collect_interval: + gc.collect() + comfy.model_management.soft_empty_cache() + last_gc_collect = current_time + need_gc = False async def run(server, address='', port=8188, verbose=True, call_on_start=None): await asyncio.gather(server.start(address, port, verbose, call_on_start), server.publish_loop()) From c97be4db91d4a249c19afdf88fa1cf3268544e45 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 30 Nov 2023 19:27:03 -0500 Subject: [PATCH 160/170] Support SD2.1 turbo checkpoint. --- comfy/supported_models.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/comfy/supported_models.py b/comfy/supported_models.py index 7e2ac677d..455323b96 100644 --- a/comfy/supported_models.py +++ b/comfy/supported_models.py @@ -71,6 +71,10 @@ class SD20(supported_models_base.BASE): return model_base.ModelType.EPS def process_clip_state_dict(self, state_dict): + replace_prefix = {} + replace_prefix["conditioner.embedders.0.model."] = "cond_stage_model.model." #SD2 in sgm format + state_dict = utils.state_dict_prefix_replace(state_dict, replace_prefix) + state_dict = utils.transformers_convert(state_dict, "cond_stage_model.model.", "cond_stage_model.clip_h.transformer.text_model.", 24) return state_dict From 5d5c320054758413be00e98b26a28b39ee8f2acd Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 1 Dec 2023 02:03:34 -0500 Subject: [PATCH 161/170] Fix right click not working for some users. --- web/extensions/core/groupNode.js | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/web/extensions/core/groupNode.js b/web/extensions/core/groupNode.js index 450b4f5f3..397c4c713 100644 --- a/web/extensions/core/groupNode.js +++ b/web/extensions/core/groupNode.js @@ -1010,7 +1010,7 @@ function addConvertToGroupOptions() { const getCanvasMenuOptions = LGraphCanvas.prototype.getCanvasMenuOptions; LGraphCanvas.prototype.getCanvasMenuOptions = function () { const options = getCanvasMenuOptions.apply(this, arguments); - const index = options.findIndex((o) => o?.content === "Add Group") + 1 || opts.length; + const index = options.findIndex((o) => o?.content === "Add Group") + 1 || options.length; addOption(options, index); return options; }; @@ -1020,7 +1020,7 @@ function addConvertToGroupOptions() { LGraphCanvas.prototype.getNodeMenuOptions = function (node) { const options = getNodeMenuOptions.apply(this, arguments); if (!GroupNodeHandler.isGroupNode(node)) { - const index = options.findIndex((o) => o?.content === "Outputs") + 1 || opts.length - 1; + const index = options.findIndex((o) => o?.content === "Outputs") + 1 || options.length - 1; addOption(options, index); } return options; From ec7a00aa9644049c306dd0a2c02cb4f91f127286 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 1 Dec 2023 04:13:04 -0500 Subject: [PATCH 162/170] Fix extension widgets not working. --- web/extensions/core/groupNode.js | 3 +-- web/scripts/app.js | 18 ++++++++++++++++-- web/scripts/widgets.js | 14 -------------- 3 files changed, 17 insertions(+), 18 deletions(-) diff --git a/web/extensions/core/groupNode.js b/web/extensions/core/groupNode.js index 397c4c713..4b4bf74fa 100644 --- a/web/extensions/core/groupNode.js +++ b/web/extensions/core/groupNode.js @@ -1,6 +1,5 @@ import { app } from "../../scripts/app.js"; import { api } from "../../scripts/api.js"; -import { getWidgetType } from "../../scripts/widgets.js"; import { mergeIfValid } from "./widgetInputs.js"; const GROUP = Symbol(); @@ -332,7 +331,7 @@ export class GroupNodeConfig { const converted = new Map(); const widgetMap = (this.oldToNewWidgetMap[node.index] = {}); for (const inputName of inputNames) { - let widgetType = getWidgetType(inputs[inputName], inputName); + let widgetType = app.getWidgetType(inputs[inputName], inputName); if (widgetType) { const convertedIndex = node.inputs?.findIndex( (inp) => inp.name === inputName && inp.widget?.name === inputName diff --git a/web/scripts/app.js b/web/scripts/app.js index e9cfb277d..a72e30027 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -1,5 +1,5 @@ import { ComfyLogging } from "./logging.js"; -import { ComfyWidgets, getWidgetType } from "./widgets.js"; +import { ComfyWidgets } from "./widgets.js"; import { ComfyUI, $el } from "./ui.js"; import { api } from "./api.js"; import { defaultGraph } from "./defaultGraph.js"; @@ -1377,6 +1377,20 @@ export class ComfyApp { await this.#invokeExtensionsAsync("registerCustomNodes"); } + getWidgetType(inputData, inputName) { + const type = inputData[0]; + + if (Array.isArray(type)) { + return "COMBO"; + } else if (`${type}:${inputName}` in this.widgets) { + return `${type}:${inputName}`; + } else if (type in this.widgets) { + return type; + } else { + return null; + } + } + async registerNodeDef(nodeId, nodeData) { const self = this; const node = Object.assign( @@ -1391,7 +1405,7 @@ export class ComfyApp { const type = inputData[0]; let widgetCreated = true; - const widgetType = getWidgetType(inputData, inputName); + const widgetType = self.getWidgetType(inputData, inputName); if(widgetType) { if(widgetType === "COMBO") { Object.assign(config, self.widgets.COMBO(this, inputName, inputData, app) || {}); diff --git a/web/scripts/widgets.js b/web/scripts/widgets.js index de5877e54..d599b85ba 100644 --- a/web/scripts/widgets.js +++ b/web/scripts/widgets.js @@ -23,20 +23,6 @@ function getNumberDefaults(inputData, defaultStep, precision, enable_rounding) { return { val: defaultVal, config: { min, max, step: 10.0 * step, round, precision } }; } -export function getWidgetType(inputData, inputName) { - const type = inputData[0]; - - if (Array.isArray(type)) { - return "COMBO"; - } else if (`${type}:${inputName}` in ComfyWidgets) { - return `${type}:${inputName}`; - } else if (type in ComfyWidgets) { - return type; - } else { - return null; - } -} - export function addValueControlWidget(node, targetWidget, defaultValue = "randomize", values, widgetName, inputData) { let name = inputData[1]?.control_after_generate; if(typeof name !== "string") { From 8491280504d69f38d1bc72568f8f745c5dc41d74 Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Fri, 1 Dec 2023 22:24:20 +0000 Subject: [PATCH 163/170] Add Extension tests (#2125) * Add test for extension hooks Add afterConfigureGraph callback * fix comment --- tests-ui/tests/extensions.test.js | 196 ++++++++++++++++++++++++++++++ tests-ui/utils/index.js | 7 +- web/scripts/app.js | 1 + 3 files changed, 201 insertions(+), 3 deletions(-) create mode 100644 tests-ui/tests/extensions.test.js diff --git a/tests-ui/tests/extensions.test.js b/tests-ui/tests/extensions.test.js new file mode 100644 index 000000000..b82e55c32 --- /dev/null +++ b/tests-ui/tests/extensions.test.js @@ -0,0 +1,196 @@ +// @ts-check +/// +const { start } = require("../utils"); +const lg = require("../utils/litegraph"); + +describe("extensions", () => { + beforeEach(() => { + lg.setup(global); + }); + + afterEach(() => { + lg.teardown(global); + }); + + it("calls each extension hook", async () => { + const mockExtension = { + name: "TestExtension", + init: jest.fn(), + setup: jest.fn(), + addCustomNodeDefs: jest.fn(), + getCustomWidgets: jest.fn(), + beforeRegisterNodeDef: jest.fn(), + registerCustomNodes: jest.fn(), + loadedGraphNode: jest.fn(), + nodeCreated: jest.fn(), + beforeConfigureGraph: jest.fn(), + afterConfigureGraph: jest.fn(), + }; + + const { app, ez, graph } = await start({ + async preSetup(app) { + app.registerExtension(mockExtension); + }, + }); + + // Basic initialisation hooks should be called once, with app + expect(mockExtension.init).toHaveBeenCalledTimes(1); + expect(mockExtension.init).toHaveBeenCalledWith(app); + + // Adding custom node defs should be passed the full list of nodes + expect(mockExtension.addCustomNodeDefs).toHaveBeenCalledTimes(1); + expect(mockExtension.addCustomNodeDefs.mock.calls[0][1]).toStrictEqual(app); + const defs = mockExtension.addCustomNodeDefs.mock.calls[0][0]; + expect(defs).toHaveProperty("KSampler"); + expect(defs).toHaveProperty("LoadImage"); + + // Get custom widgets is called once and should return new widget types + expect(mockExtension.getCustomWidgets).toHaveBeenCalledTimes(1); + expect(mockExtension.getCustomWidgets).toHaveBeenCalledWith(app); + + // Before register node def will be called once per node type + const nodeNames = Object.keys(defs); + const nodeCount = nodeNames.length; + expect(mockExtension.beforeRegisterNodeDef).toHaveBeenCalledTimes(nodeCount); + for (let i = 0; i < nodeCount; i++) { + // It should be send the JS class and the original JSON definition + const nodeClass = mockExtension.beforeRegisterNodeDef.mock.calls[i][0]; + const nodeDef = mockExtension.beforeRegisterNodeDef.mock.calls[i][1]; + + expect(nodeClass.name).toBe("ComfyNode"); + expect(nodeClass.comfyClass).toBe(nodeNames[i]); + expect(nodeDef.name).toBe(nodeNames[i]); + expect(nodeDef).toHaveProperty("input"); + expect(nodeDef).toHaveProperty("output"); + } + + // Register custom nodes is called once after registerNode defs to allow adding other frontend nodes + expect(mockExtension.registerCustomNodes).toHaveBeenCalledTimes(1); + + // Before configure graph will be called here as the default graph is being loaded + expect(mockExtension.beforeConfigureGraph).toHaveBeenCalledTimes(1); + // it gets sent the graph data that is going to be loaded + const graphData = mockExtension.beforeConfigureGraph.mock.calls[0][0]; + + // A node created is fired for each node constructor that is called + expect(mockExtension.nodeCreated).toHaveBeenCalledTimes(graphData.nodes.length); + for (let i = 0; i < graphData.nodes.length; i++) { + expect(mockExtension.nodeCreated.mock.calls[i][0].type).toBe(graphData.nodes[i].type); + } + + // Each node then calls loadedGraphNode to allow them to be updated + expect(mockExtension.loadedGraphNode).toHaveBeenCalledTimes(graphData.nodes.length); + for (let i = 0; i < graphData.nodes.length; i++) { + expect(mockExtension.loadedGraphNode.mock.calls[i][0].type).toBe(graphData.nodes[i].type); + } + + // After configure is then called once all the setup is done + expect(mockExtension.afterConfigureGraph).toHaveBeenCalledTimes(1); + + expect(mockExtension.setup).toHaveBeenCalledTimes(1); + expect(mockExtension.setup).toHaveBeenCalledWith(app); + + // Ensure hooks are called in the correct order + const callOrder = [ + "init", + "addCustomNodeDefs", + "getCustomWidgets", + "beforeRegisterNodeDef", + "registerCustomNodes", + "beforeConfigureGraph", + "nodeCreated", + "loadedGraphNode", + "afterConfigureGraph", + "setup", + ]; + for (let i = 1; i < callOrder.length; i++) { + const fn1 = mockExtension[callOrder[i - 1]]; + const fn2 = mockExtension[callOrder[i]]; + expect(fn1.mock.invocationCallOrder[0]).toBeLessThan(fn2.mock.invocationCallOrder[0]); + } + + graph.clear(); + + // Ensure adding a new node calls the correct callback + ez.LoadImage(); + expect(mockExtension.loadedGraphNode).toHaveBeenCalledTimes(graphData.nodes.length); + expect(mockExtension.nodeCreated).toHaveBeenCalledTimes(graphData.nodes.length + 1); + expect(mockExtension.nodeCreated.mock.lastCall[0].type).toBe("LoadImage"); + + // Reload the graph to ensure correct hooks are fired + await graph.reload(); + + // These hooks should not be fired again + expect(mockExtension.init).toHaveBeenCalledTimes(1); + expect(mockExtension.addCustomNodeDefs).toHaveBeenCalledTimes(1); + expect(mockExtension.getCustomWidgets).toHaveBeenCalledTimes(1); + expect(mockExtension.registerCustomNodes).toHaveBeenCalledTimes(1); + expect(mockExtension.beforeRegisterNodeDef).toHaveBeenCalledTimes(nodeCount); + expect(mockExtension.setup).toHaveBeenCalledTimes(1); + + // These should be called again + expect(mockExtension.beforeConfigureGraph).toHaveBeenCalledTimes(2); + expect(mockExtension.nodeCreated).toHaveBeenCalledTimes(graphData.nodes.length + 2); + expect(mockExtension.loadedGraphNode).toHaveBeenCalledTimes(graphData.nodes.length + 1); + expect(mockExtension.afterConfigureGraph).toHaveBeenCalledTimes(2); + }); + + it("allows custom nodeDefs and widgets to be registered", async () => { + const widgetMock = jest.fn((node, inputName, inputData, app) => { + expect(node.constructor.comfyClass).toBe("TestNode"); + expect(inputName).toBe("test_input"); + expect(inputData[0]).toBe("CUSTOMWIDGET"); + expect(inputData[1]?.hello).toBe("world"); + expect(app).toStrictEqual(app); + + return { + widget: node.addWidget("button", inputName, "hello", () => {}), + }; + }); + + // Register our extension that adds a custom node + widget type + const mockExtension = { + name: "TestExtension", + addCustomNodeDefs: (nodeDefs) => { + nodeDefs["TestNode"] = { + output: [], + output_name: [], + output_is_list: [], + name: "TestNode", + display_name: "TestNode", + category: "Test", + input: { + required: { + test_input: ["CUSTOMWIDGET", { hello: "world" }], + }, + }, + }; + }, + getCustomWidgets: jest.fn(() => { + return { + CUSTOMWIDGET: widgetMock, + }; + }), + }; + + const { graph, ez } = await start({ + async preSetup(app) { + app.registerExtension(mockExtension); + }, + }); + + expect(mockExtension.getCustomWidgets).toBeCalledTimes(1); + + graph.clear(); + expect(widgetMock).toBeCalledTimes(0); + const node = ez.TestNode(); + expect(widgetMock).toBeCalledTimes(1); + + // Ensure our custom widget is created + expect(node.inputs.length).toBe(0); + expect(node.widgets.length).toBe(1); + const w = node.widgets[0].widget; + expect(w.name).toBe("test_input"); + expect(w.type).toBe("button"); + }); +}); diff --git a/tests-ui/utils/index.js b/tests-ui/utils/index.js index eeccdb3d9..3a018f566 100644 --- a/tests-ui/utils/index.js +++ b/tests-ui/utils/index.js @@ -4,11 +4,11 @@ const lg = require("./litegraph"); /** * - * @param { Parameters[0] & { resetEnv?: boolean } } config + * @param { Parameters[0] & { resetEnv?: boolean, preSetup?(app): Promise } } config * @returns */ -export async function start(config = undefined) { - if(config?.resetEnv) { +export async function start(config = {}) { + if(config.resetEnv) { jest.resetModules(); jest.resetAllMocks(); lg.setup(global); @@ -16,6 +16,7 @@ export async function start(config = undefined) { mockApi(config); const { app } = require("../../web/scripts/app"); + config.preSetup?.(app); await app.setup(); return { ...Ez.graph(app, global["LiteGraph"], global["LGraphCanvas"]), app }; } diff --git a/web/scripts/app.js b/web/scripts/app.js index a72e30027..861db16bd 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -1654,6 +1654,7 @@ export class ComfyApp { if (missingNodeTypes.length) { this.showMissingNodesError(missingNodeTypes); } + await this.#invokeExtensionsAsync("afterConfigureGraph", missingNodeTypes); } /** From 2995a2472541cb10ce9f4934baef9d5993ed3306 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 1 Dec 2023 18:29:33 -0500 Subject: [PATCH 164/170] Update readme. --- README.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index af1f22811..450a012bb 100644 --- a/README.md +++ b/README.md @@ -45,6 +45,7 @@ Workflow examples can be found on the [Examples page](https://comfyanonymous.git |---------------------------|--------------------------------------------------------------------------------------------------------------------| | Ctrl + Enter | Queue up current graph for generation | | Ctrl + Shift + Enter | Queue up current graph as first for generation | +| Ctrl + Z/Ctrl + Y | Undo/Redo | | Ctrl + S | Save workflow | | Ctrl + O | Load workflow | | Ctrl + A | Select all nodes | @@ -100,6 +101,7 @@ AMD users can install rocm and pytorch with pip if you don't have it already ins ```pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/rocm5.6``` This is the command to install the nightly with ROCm 5.7 that might have some performance improvements: + ```pip install --pre torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/rocm5.7``` ### NVIDIA @@ -192,7 +194,7 @@ To use a textual inversion concepts/embeddings in a text prompt put them in the Make sure you use the regular loaders/Load Checkpoint node to load checkpoints. It will auto pick the right settings depending on your GPU. -You can set this command line setting to disable the upcasting to fp32 in some cross attention operations which will increase your speed. Note that this will very likely give you black images on SD2.x models. If you use xformers this option does not do anything. +You can set this command line setting to disable the upcasting to fp32 in some cross attention operations which will increase your speed. Note that this will very likely give you black images on SD2.x models. If you use xformers or pytorch attention this option does not do anything. ```--dont-upcast-attention``` From 28220fa8392b6b2f0eabb0d0c8311ff3b07af69a Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Sat, 2 Dec 2023 12:02:03 +0000 Subject: [PATCH 165/170] Fix node growing with DOM widgets when adding image even if enough space --- web/scripts/app.js | 2 +- web/scripts/domWidget.js | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/web/scripts/app.js b/web/scripts/app.js index 861db16bd..8598d4478 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -411,7 +411,7 @@ export class ComfyApp { node.prototype.setSizeForImage = function (force) { if(!force && this.animatedImages) return; - if (this.inputHeight) { + if (this.inputHeight || this.freeWidgetSpace > 210) { this.setSize(this.size); return; } diff --git a/web/scripts/domWidget.js b/web/scripts/domWidget.js index 37d26f3c5..e919428a0 100644 --- a/web/scripts/domWidget.js +++ b/web/scripts/domWidget.js @@ -120,6 +120,8 @@ function computeSize(size) { freeSpace -= 220; } + this.freeWidgetSpace = freeSpace; + if (freeSpace < 0) { // Not enough space for all widgets so we need to grow size[1] -= freeSpace; From b2517b4ceb2e0fdea438586d1c8883eccab1f9bf Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sat, 2 Dec 2023 13:56:11 -0500 Subject: [PATCH 166/170] Load api workflow if regular workflow isn't in loaded image. --- web/scripts/app.js | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/web/scripts/app.js b/web/scripts/app.js index d3049058a..dc0f2c3c9 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -1878,6 +1878,8 @@ export class ComfyApp { if (pngInfo) { if (pngInfo.workflow) { await this.loadGraphData(JSON.parse(pngInfo.workflow)); + } else if (pngInfo.prompt) { + this.loadApiJson(JSON.parse(pngInfo.prompt)); } else if (pngInfo.parameters) { importA1111(this.graph, pngInfo.parameters); } @@ -1889,6 +1891,8 @@ export class ComfyApp { this.loadGraphData(JSON.parse(pngInfo.workflow)); } else if (pngInfo.Workflow) { this.loadGraphData(JSON.parse(pngInfo.Workflow)); // Support loading workflows from that webp custom node. + } else if (pngInfo.prompt) { + this.loadApiJson(JSON.parse(pngInfo.prompt)); } } } else if (file.type === "application/json" || file.name?.endsWith(".json")) { @@ -1908,6 +1912,8 @@ export class ComfyApp { const info = await getLatentMetadata(file); if (info.workflow) { await this.loadGraphData(JSON.parse(info.workflow)); + } else if (info.prompt) { + this.loadApiJson(JSON.parse(info.prompt)); } } } From 61a123a1e083c584a333874b89828125171f7635 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sun, 3 Dec 2023 03:31:47 -0500 Subject: [PATCH 167/170] A different way of handling multiple images passed to SVD. Previously when a list of 3 images [0, 1, 2] was used for a 6 frame video they were concated like this: [0, 1, 2, 0, 1, 2] now they are concated like this: [0, 0, 1, 1, 2, 2] --- comfy/model_base.py | 2 +- comfy/utils.py | 20 ++++++++++++++++++++ 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/comfy/model_base.py b/comfy/model_base.py index 786c9cf47..253ea6667 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -303,7 +303,7 @@ class SVD_img2vid(BaseModel): if latent_image.shape[1:] != noise.shape[1:]: latent_image = utils.common_upscale(latent_image, noise.shape[-1], noise.shape[-2], "bilinear", "center") - latent_image = utils.repeat_to_batch_size(latent_image, noise.shape[0]) + latent_image = utils.resize_to_batch_size(latent_image, noise.shape[0]) out['c_concat'] = comfy.conds.CONDNoiseShape(latent_image) diff --git a/comfy/utils.py b/comfy/utils.py index 294bbb425..505577047 100644 --- a/comfy/utils.py +++ b/comfy/utils.py @@ -239,6 +239,26 @@ def repeat_to_batch_size(tensor, batch_size): return tensor.repeat([math.ceil(batch_size / tensor.shape[0])] + [1] * (len(tensor.shape) - 1))[:batch_size] return tensor +def resize_to_batch_size(tensor, batch_size): + in_batch_size = tensor.shape[0] + if in_batch_size == batch_size: + return tensor + + if batch_size <= 1: + return tensor[:batch_size] + + output = torch.empty([batch_size] + list(tensor.shape)[1:], dtype=tensor.dtype, device=tensor.device) + if batch_size < in_batch_size: + scale = (in_batch_size - 1) / (batch_size - 1) + for i in range(batch_size): + output[i] = tensor[min(round(i * scale), in_batch_size - 1)] + else: + scale = in_batch_size / batch_size + for i in range(batch_size): + output[i] = tensor[min(math.floor((i + 0.5) * scale), in_batch_size - 1)] + + return output + def convert_sd_to(state_dict, dtype): keys = list(state_dict.keys()) for k in keys: From 496de0891d9b7ae37c6dbbc68c5e22802302dc8f Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Sun, 3 Dec 2023 16:49:48 +0000 Subject: [PATCH 168/170] Allow removing erroring embedded groups Unregister group nodes on workflow change --- web/extensions/core/groupNode.js | 29 ++++++++++++++++++++++++++++- web/scripts/app.js | 28 +++++++++++++++++++++++++--- web/style.css | 5 +++++ 3 files changed, 58 insertions(+), 4 deletions(-) diff --git a/web/extensions/core/groupNode.js b/web/extensions/core/groupNode.js index 4b4bf74fa..6766f356d 100644 --- a/web/extensions/core/groupNode.js +++ b/web/extensions/core/groupNode.js @@ -475,6 +475,16 @@ export class GroupNodeConfig { } static async registerFromWorkflow(groupNodes, missingNodeTypes) { + const clean = app.clean; + app.clean = function () { + for (const g in groupNodes) { + try { + LiteGraph.unregisterNodeType("workflow/" + g); + } catch (error) {} + } + app.clean = clean; + }; + for (const g in groupNodes) { const groupData = groupNodes[g]; @@ -482,7 +492,24 @@ export class GroupNodeConfig { for (const n of groupData.nodes) { // Find missing node types if (!(n.type in LiteGraph.registered_node_types)) { - missingNodeTypes.push(n.type); + missingNodeTypes.push({ + type: n.type, + hint: ` (In group node 'workflow/${g}')`, + }); + + missingNodeTypes.push({ + type: "workflow/" + g, + action: { + text: "Remove from workflow", + callback: (e) => { + delete groupNodes[g]; + e.target.textContent = "Removed"; + e.target.style.pointerEvents = "none"; + e.target.style.opacity = 0.7; + }, + }, + }); + hasMissing = true; } } diff --git a/web/scripts/app.js b/web/scripts/app.js index dc0f2c3c9..b3a22f300 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -1519,14 +1519,36 @@ export class ComfyApp { } showMissingNodesError(missingNodeTypes, hasAddedNodes = true) { + let seenTypes = new Set(); + this.ui.dialog.show( - $el("div", [ + $el("div.comfy-missing-nodes", [ $el("span", { textContent: "When loading the graph, the following node types were not found: " }), $el( "ul", - Array.from(new Set(missingNodeTypes)).map((t) => $el("li", { textContent: t })) + Array.from(new Set(missingNodeTypes)).map((t) => { + let children = []; + if (typeof t === "object") { + if(seenTypes.has(t.type)) return null; + seenTypes.add(t.type); + children.push($el("span", { textContent: t.type })); + if (t.hint) { + children.push($el("span", { textContent: t.hint })); + } + if (t.action) { + children.push($el("button", { onclick: t.action.callback, textContent: t.action.text })); + } + } else { + if(seenTypes.has(t)) return null; + seenTypes.add(t); + children.push($el("span", { textContent: t })); + } + return $el("li", children); + }).filter(Boolean) ), - ...(hasAddedNodes ? [$el("span", { textContent: "Nodes that have failed to load will show as red on the graph." })] : []), + ...(hasAddedNodes + ? [$el("span", { textContent: "Nodes that have failed to load will show as red on the graph." })] + : []), ]) ); this.logging.addEntry("Comfy.App", "warn", { diff --git a/web/style.css b/web/style.css index 378fe0a48..630eea12e 100644 --- a/web/style.css +++ b/web/style.css @@ -424,6 +424,11 @@ dialog::backdrop { height: var(--comfy-img-preview-height); } +.comfy-missing-nodes li button { + font-size: 12px; + margin-left: 5px; +} + /* Search box */ .litegraph.litesearchbox { From 44d8abadf08e0dcef3fb97d66d46e7cabc160e60 Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Sun, 3 Dec 2023 17:04:16 +0000 Subject: [PATCH 169/170] allow muting group node --- web/scripts/app.js | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/web/scripts/app.js b/web/scripts/app.js index b3a22f300..5faf41fb3 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -1704,7 +1704,8 @@ export class ComfyApp { const output = {}; // Process nodes in order of execution for (const outerNode of this.graph.computeExecutionOrder(false)) { - const innerNodes = outerNode.getInnerNodes ? outerNode.getInnerNodes() : [outerNode]; + const skipNode = outerNode.mode === 2 || outerNode.mode === 4; + const innerNodes = (!skipNode && outerNode.getInnerNodes) ? outerNode.getInnerNodes() : [outerNode]; for (const node of innerNodes) { if (node.isVirtualNode) { continue; From 77ab2c3f699ca35d19060d8add6bccd66a1b532f Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Sun, 3 Dec 2023 17:17:23 +0000 Subject: [PATCH 170/170] fix template sorting --- web/extensions/core/nodeTemplates.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/extensions/core/nodeTemplates.js b/web/extensions/core/nodeTemplates.js index 2d4821742..bc9a10864 100644 --- a/web/extensions/core/nodeTemplates.js +++ b/web/extensions/core/nodeTemplates.js @@ -164,7 +164,7 @@ class ManageTemplates extends ComfyDialog { var prev_i = el.dataset.id; if ( el == this.draggedEl && prev_i != i ) { - [this.templates[i], this.templates[prev_i]] = [this.templates[prev_i], this.templates[i]]; + this.templates.splice(i, 0, this.templates.splice(prev_i, 1)[0]); } el.dataset.id = i; });