From e1c6dc720e01cc9f33adef388361a001297dca43 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Mon, 16 Jun 2025 10:43:52 -0700 Subject: [PATCH 01/11] Allow setting min_length with tokenizer_data. (#8547) --- comfy/sd1_clip.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/sd1_clip.py b/comfy/sd1_clip.py index ac61babe9..1b69a4103 100644 --- a/comfy/sd1_clip.py +++ b/comfy/sd1_clip.py @@ -462,7 +462,7 @@ class SDTokenizer: tokenizer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "sd1_tokenizer") self.tokenizer = tokenizer_class.from_pretrained(tokenizer_path, **tokenizer_args) self.max_length = tokenizer_data.get("{}_max_length".format(embedding_key), max_length) - self.min_length = min_length + self.min_length = tokenizer_data.get("{}_min_length".format(embedding_key), min_length) self.end_token = None self.min_padding = min_padding From 8e81c507d2124b6c04993e3dbe3df9f40573f814 Mon Sep 17 00:00:00 2001 From: chaObserv <154517000+chaObserv@users.noreply.github.com> Date: Tue, 17 Jun 2025 02:47:10 +0800 Subject: [PATCH 02/11] Multistep DPM++ SDE samplers for RF (#8541) Include alpha in sampling and minor refactoring --- comfy/k_diffusion/sampling.py | 50 ++++++++++++++++++++++++----------- 1 file changed, 34 insertions(+), 16 deletions(-) diff --git a/comfy/k_diffusion/sampling.py b/comfy/k_diffusion/sampling.py index a8fd98493..8030048fc 100644 --- a/comfy/k_diffusion/sampling.py +++ b/comfy/k_diffusion/sampling.py @@ -781,6 +781,7 @@ def sample_dpmpp_2m(model, x, sigmas, extra_args=None, callback=None, disable=No old_denoised = denoised return x + @torch.no_grad() def sample_dpmpp_2m_sde(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None, solver_type='midpoint'): """DPM-Solver++(2M) SDE.""" @@ -796,9 +797,12 @@ def sample_dpmpp_2m_sde(model, x, sigmas, extra_args=None, callback=None, disabl noise_sampler = BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=seed, cpu=True) if noise_sampler is None else noise_sampler s_in = x.new_ones([x.shape[0]]) + model_sampling = model.inner_model.model_patcher.get_model_object('model_sampling') + lambda_fn = partial(sigma_to_half_log_snr, model_sampling=model_sampling) + sigmas = offset_first_sigma_for_snr(sigmas, model_sampling) + old_denoised = None - h_last = None - h = None + h, h_last = None, None for i in trange(len(sigmas) - 1, disable=disable): denoised = model(x, sigmas[i] * s_in, **extra_args) @@ -809,26 +813,29 @@ def sample_dpmpp_2m_sde(model, x, sigmas, extra_args=None, callback=None, disabl x = denoised else: # DPM-Solver++(2M) SDE - t, s = -sigmas[i].log(), -sigmas[i + 1].log() - h = s - t - eta_h = eta * h + lambda_s, lambda_t = lambda_fn(sigmas[i]), lambda_fn(sigmas[i + 1]) + h = lambda_t - lambda_s + h_eta = h * (eta + 1) - x = sigmas[i + 1] / sigmas[i] * (-eta_h).exp() * x + (-h - eta_h).expm1().neg() * denoised + alpha_t = sigmas[i + 1] * lambda_t.exp() + + x = sigmas[i + 1] / sigmas[i] * (-h * eta).exp() * x + alpha_t * (-h_eta).expm1().neg() * denoised if old_denoised is not None: r = h_last / h if solver_type == 'heun': - x = x + ((-h - eta_h).expm1().neg() / (-h - eta_h) + 1) * (1 / r) * (denoised - old_denoised) + x = x + alpha_t * ((-h_eta).expm1().neg() / (-h_eta) + 1) * (1 / r) * (denoised - old_denoised) elif solver_type == 'midpoint': - x = x + 0.5 * (-h - eta_h).expm1().neg() * (1 / r) * (denoised - old_denoised) + x = x + 0.5 * alpha_t * (-h_eta).expm1().neg() * (1 / r) * (denoised - old_denoised) - if eta: - x = x + noise_sampler(sigmas[i], sigmas[i + 1]) * sigmas[i + 1] * (-2 * eta_h).expm1().neg().sqrt() * s_noise + if eta > 0 and s_noise > 0: + x = x + noise_sampler(sigmas[i], sigmas[i + 1]) * sigmas[i + 1] * (-2 * h * eta).expm1().neg().sqrt() * s_noise old_denoised = denoised h_last = h return x + @torch.no_grad() def sample_dpmpp_3m_sde(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None): """DPM-Solver++(3M) SDE.""" @@ -842,6 +849,10 @@ def sample_dpmpp_3m_sde(model, x, sigmas, extra_args=None, callback=None, disabl noise_sampler = BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=seed, cpu=True) if noise_sampler is None else noise_sampler s_in = x.new_ones([x.shape[0]]) + model_sampling = model.inner_model.model_patcher.get_model_object('model_sampling') + lambda_fn = partial(sigma_to_half_log_snr, model_sampling=model_sampling) + sigmas = offset_first_sigma_for_snr(sigmas, model_sampling) + denoised_1, denoised_2 = None, None h, h_1, h_2 = None, None, None @@ -853,13 +864,16 @@ def sample_dpmpp_3m_sde(model, x, sigmas, extra_args=None, callback=None, disabl # Denoising step x = denoised else: - t, s = -sigmas[i].log(), -sigmas[i + 1].log() - h = s - t + lambda_s, lambda_t = lambda_fn(sigmas[i]), lambda_fn(sigmas[i + 1]) + h = lambda_t - lambda_s h_eta = h * (eta + 1) - x = torch.exp(-h_eta) * x + (-h_eta).expm1().neg() * denoised + alpha_t = sigmas[i + 1] * lambda_t.exp() + + x = sigmas[i + 1] / sigmas[i] * (-h * eta).exp() * x + alpha_t * (-h_eta).expm1().neg() * denoised if h_2 is not None: + # DPM-Solver++(3M) SDE r0 = h_1 / h r1 = h_2 / h d1_0 = (denoised - denoised_1) / r0 @@ -868,20 +882,22 @@ def sample_dpmpp_3m_sde(model, x, sigmas, extra_args=None, callback=None, disabl d2 = (d1_0 - d1_1) / (r0 + r1) phi_2 = h_eta.neg().expm1() / h_eta + 1 phi_3 = phi_2 / h_eta - 0.5 - x = x + phi_2 * d1 - phi_3 * d2 + x = x + (alpha_t * phi_2) * d1 - (alpha_t * phi_3) * d2 elif h_1 is not None: + # DPM-Solver++(2M) SDE r = h_1 / h d = (denoised - denoised_1) / r phi_2 = h_eta.neg().expm1() / h_eta + 1 - x = x + phi_2 * d + x = x + (alpha_t * phi_2) * d - if eta: + if eta > 0 and s_noise > 0: x = x + noise_sampler(sigmas[i], sigmas[i + 1]) * sigmas[i + 1] * (-2 * h * eta).expm1().neg().sqrt() * s_noise denoised_1, denoised_2 = denoised, denoised_1 h_1, h_2 = h, h_1 return x + @torch.no_grad() def sample_dpmpp_3m_sde_gpu(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None): if len(sigmas) <= 1: @@ -891,6 +907,7 @@ def sample_dpmpp_3m_sde_gpu(model, x, sigmas, extra_args=None, callback=None, di noise_sampler = BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=extra_args.get("seed", None), cpu=False) if noise_sampler is None else noise_sampler return sample_dpmpp_3m_sde(model, x, sigmas, extra_args=extra_args, callback=callback, disable=disable, eta=eta, s_noise=s_noise, noise_sampler=noise_sampler) + @torch.no_grad() def sample_dpmpp_2m_sde_gpu(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None, solver_type='midpoint'): if len(sigmas) <= 1: @@ -900,6 +917,7 @@ def sample_dpmpp_2m_sde_gpu(model, x, sigmas, extra_args=None, callback=None, di noise_sampler = BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=extra_args.get("seed", None), cpu=False) if noise_sampler is None else noise_sampler return sample_dpmpp_2m_sde(model, x, sigmas, extra_args=extra_args, callback=callback, disable=disable, eta=eta, s_noise=s_noise, noise_sampler=noise_sampler, solver_type=solver_type) + @torch.no_grad() def sample_dpmpp_sde_gpu(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None, r=1 / 2): if len(sigmas) <= 1: From 483b3e62e00624fc52da8ad67e88f863abe975d2 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 16 Jun 2025 23:34:46 -0400 Subject: [PATCH 03/11] ComfyUI version v0.3.41 --- comfyui_version.py | 2 +- pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/comfyui_version.py b/comfyui_version.py index 6962c3661..fedd3466f 100644 --- a/comfyui_version.py +++ b/comfyui_version.py @@ -1,3 +1,3 @@ # This file is automatically generated by the build process when version is # updated in pyproject.toml. -__version__ = "0.3.40" +__version__ = "0.3.41" diff --git a/pyproject.toml b/pyproject.toml index 03841bc94..c572ad4c6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "ComfyUI" -version = "0.3.40" +version = "0.3.41" readme = "README.md" license = { file = "LICENSE" } requires-python = ">=3.9" From 4459a17e828eb8f96ffe07e3a08d68aff77b4da0 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Tue, 17 Jun 2025 02:18:01 -0700 Subject: [PATCH 04/11] Add Cosmos Predict2 to README. (#8562) --- README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 9a35ab7ea..0de4a6bb5 100644 --- a/README.md +++ b/README.md @@ -65,12 +65,13 @@ See what ComfyUI can do with the [example workflows](https://comfyanonymous.gith - [Flux](https://comfyanonymous.github.io/ComfyUI_examples/flux/) - [Lumina Image 2.0](https://comfyanonymous.github.io/ComfyUI_examples/lumina2/) - [HiDream](https://comfyanonymous.github.io/ComfyUI_examples/hidream/) + - [Cosmos Predict2](https://comfyanonymous.github.io/ComfyUI_examples/cosmos_predict2/) - Video Models - [Stable Video Diffusion](https://comfyanonymous.github.io/ComfyUI_examples/video/) - [Mochi](https://comfyanonymous.github.io/ComfyUI_examples/mochi/) - [LTX-Video](https://comfyanonymous.github.io/ComfyUI_examples/ltxv/) - [Hunyuan Video](https://comfyanonymous.github.io/ComfyUI_examples/hunyuan_video/) - - [Nvidia Cosmos](https://comfyanonymous.github.io/ComfyUI_examples/cosmos/) + - [Nvidia Cosmos](https://comfyanonymous.github.io/ComfyUI_examples/cosmos/) and [Cosmos Predict2](https://comfyanonymous.github.io/ComfyUI_examples/cosmos_predict2/) - [Wan 2.1](https://comfyanonymous.github.io/ComfyUI_examples/wan/) - Audio Models - [Stable Audio](https://comfyanonymous.github.io/ComfyUI_examples/audio/) From cd88f709ab5fbe8fec0d2b242691fef826ba038a Mon Sep 17 00:00:00 2001 From: ComfyUI Wiki Date: Tue, 17 Jun 2025 19:11:59 +0800 Subject: [PATCH 05/11] Update template version (#8563) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 336ec9d57..910634d87 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ comfyui-frontend-package==1.21.7 -comfyui-workflow-templates==0.1.28 +comfyui-workflow-templates==0.1.29 comfyui-embedded-docs==0.2.2 torch torchsde From d7430c529a586ba4005bc46ba10ce02f71dba0d8 Mon Sep 17 00:00:00 2001 From: filtered <176114999+webfiltered@users.noreply.github.com> Date: Tue, 17 Jun 2025 15:58:28 -0700 Subject: [PATCH 06/11] Update frontend to 1.22.2 (#8567) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 910634d87..15fde2849 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -comfyui-frontend-package==1.21.7 +comfyui-frontend-package==1.22.2 comfyui-workflow-templates==0.1.29 comfyui-embedded-docs==0.2.2 torch From e9e9a031a88f9cc4845b3322d4bae771d2854472 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 18 Jun 2025 03:55:21 -0700 Subject: [PATCH 07/11] Show a better error when the workflow OOMs. (#8574) --- execution.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/execution.py b/execution.py index d0012afda..f6006fa12 100644 --- a/execution.py +++ b/execution.py @@ -429,17 +429,20 @@ def execute(server, dynprompt, caches, current_item, extra_data, executed, promp logging.error(f"!!! Exception during processing !!! {ex}") logging.error(traceback.format_exc()) + tips = "" + + if isinstance(ex, comfy.model_management.OOM_EXCEPTION): + tips = "This error means you ran out of memory on your GPU.\n\nTIPS: If the workflow worked before you might have accidentally set the batch_size to a large number." + logging.error("Got an OOM, unloading all loaded models.") + comfy.model_management.unload_all_models() error_details = { "node_id": real_node_id, - "exception_message": str(ex), + "exception_message": "{}\n{}".format(ex, tips), "exception_type": exception_type, "traceback": traceback.format_tb(tb), "current_inputs": input_data_formatted } - if isinstance(ex, comfy.model_management.OOM_EXCEPTION): - logging.error("Got an OOM, unloading all loaded models.") - comfy.model_management.unload_all_models() return (ExecutionResult.FAILURE, error_details, ex) From 5b12b55e32aa2aa8fd47d545265a974d3b01ac7c Mon Sep 17 00:00:00 2001 From: coderfromthenorth93 Date: Wed, 18 Jun 2025 15:12:29 -0400 Subject: [PATCH 08/11] Add new fields to the config types (#8507) --- comfy_config/config_parser.py | 55 +++++++++++++++++++++++++++++++++++ comfy_config/types.py | 6 +++- 2 files changed, 60 insertions(+), 1 deletion(-) diff --git a/comfy_config/config_parser.py b/comfy_config/config_parser.py index a9cbd94dd..8da7bd901 100644 --- a/comfy_config/config_parser.py +++ b/comfy_config/config_parser.py @@ -11,6 +11,43 @@ from comfy_config.types import ( PyProjectSettings ) +def validate_and_extract_os_classifiers(classifiers: list) -> list: + os_classifiers = [c for c in classifiers if c.startswith("Operating System :: ")] + if not os_classifiers: + return [] + + os_values = [c[len("Operating System :: ") :] for c in os_classifiers] + valid_os_prefixes = {"Microsoft", "POSIX", "MacOS", "OS Independent"} + + for os_value in os_values: + if not any(os_value.startswith(prefix) for prefix in valid_os_prefixes): + return [] + + return os_values + + +def validate_and_extract_accelerator_classifiers(classifiers: list) -> list: + accelerator_classifiers = [c for c in classifiers if c.startswith("Environment ::")] + if not accelerator_classifiers: + return [] + + accelerator_values = [c[len("Environment :: ") :] for c in accelerator_classifiers] + + valid_accelerators = { + "GPU :: NVIDIA CUDA", + "GPU :: AMD ROCm", + "GPU :: Intel Arc", + "NPU :: Huawei Ascend", + "GPU :: Apple Metal", + } + + for accelerator_value in accelerator_values: + if accelerator_value not in valid_accelerators: + return [] + + return accelerator_values + + """ Extract configuration from a custom node directory's pyproject.toml file or a Python file. @@ -78,6 +115,24 @@ def extract_node_configuration(path) -> Optional[PyProjectConfig]: tool_data = raw_settings.tool comfy_data = tool_data.get("comfy", {}) if tool_data else {} + dependencies = project_data.get("dependencies", []) + supported_comfyui_frontend_version = "" + for dep in dependencies: + if isinstance(dep, str) and dep.startswith("comfyui-frontend-package"): + supported_comfyui_frontend_version = dep.removeprefix("comfyui-frontend-package") + break + + supported_comfyui_version = comfy_data.get("requires-comfyui", "") + + classifiers = project_data.get('classifiers', []) + supported_os = validate_and_extract_os_classifiers(classifiers) + supported_accelerators = validate_and_extract_accelerator_classifiers(classifiers) + + project_data['supported_os'] = supported_os + project_data['supported_accelerators'] = supported_accelerators + project_data['supported_comfyui_frontend_version'] = supported_comfyui_frontend_version + project_data['supported_comfyui_version'] = supported_comfyui_version + return PyProjectConfig(project=project_data, tool_comfy=comfy_data) diff --git a/comfy_config/types.py b/comfy_config/types.py index 5222cc59b..59448466b 100644 --- a/comfy_config/types.py +++ b/comfy_config/types.py @@ -51,7 +51,7 @@ class ComfyConfig(BaseModel): models: List[Model] = Field(default_factory=list, alias="Models") includes: List[str] = Field(default_factory=list) web: Optional[str] = None - + banner_url: str = "" class License(BaseModel): file: str = "" @@ -66,6 +66,10 @@ class ProjectConfig(BaseModel): dependencies: List[str] = Field(default_factory=list) license: License = Field(default_factory=License) urls: URLs = Field(default_factory=URLs) + supported_os: List[str] = Field(default_factory=list) + supported_accelerators: List[str] = Field(default_factory=list) + supported_comfyui_version: str = "" + supported_comfyui_frontend_version: str = "" @field_validator('license', mode='before') @classmethod From 91d40086db7956aabedef5cfcae0f6821529a3d1 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Thu, 19 Jun 2025 08:04:52 -0700 Subject: [PATCH 09/11] Fix pytorch warning. (#8593) --- comfy/ldm/modules/sub_quadratic_attention.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/ldm/modules/sub_quadratic_attention.py b/comfy/ldm/modules/sub_quadratic_attention.py index 21c72373f..fab145f1c 100644 --- a/comfy/ldm/modules/sub_quadratic_attention.py +++ b/comfy/ldm/modules/sub_quadratic_attention.py @@ -31,7 +31,7 @@ def dynamic_slice( starts: List[int], sizes: List[int], ) -> Tensor: - slicing = [slice(start, start + size) for start, size in zip(starts, sizes)] + slicing = tuple(slice(start, start + size) for start, size in zip(starts, sizes)) return x[slicing] class AttnChunk(NamedTuple): From 7e9267fa77c93355dd3bdf05b6cb8f02d41af5ae Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Thu, 19 Jun 2025 15:50:05 -0700 Subject: [PATCH 10/11] Make flux controlnet work with sd3 text enc. (#8599) --- comfy/ldm/flux/controlnet.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/comfy/ldm/flux/controlnet.py b/comfy/ldm/flux/controlnet.py index dbd2a47c0..7dcf82bbf 100644 --- a/comfy/ldm/flux/controlnet.py +++ b/comfy/ldm/flux/controlnet.py @@ -123,6 +123,8 @@ class ControlNetFlux(Flux): if y is None: y = torch.zeros((img.shape[0], self.params.vec_in_dim), device=img.device, dtype=img.dtype) + else: + y = y[:, :self.params.vec_in_dim] # running on sequences img img = self.img_in(img) From f7fb1937127a8ed011b99424598c9ab1e8565112 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Fri, 20 Jun 2025 02:37:32 -0700 Subject: [PATCH 11/11] Small flux optimization. (#8611) --- comfy/ldm/flux/layers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/ldm/flux/layers.py b/comfy/ldm/flux/layers.py index 76af967e6..113eb2096 100644 --- a/comfy/ldm/flux/layers.py +++ b/comfy/ldm/flux/layers.py @@ -118,7 +118,7 @@ class Modulation(nn.Module): def apply_mod(tensor, m_mult, m_add=None, modulation_dims=None): if modulation_dims is None: if m_add is not None: - return tensor * m_mult + m_add + return torch.addcmul(m_add, tensor, m_mult) else: return tensor * m_mult else: