From c16db7fd6978eee76fca65626c02e61eaedb5e72 Mon Sep 17 00:00:00 2001 From: Comfy Org PR Bot Date: Wed, 15 Apr 2026 03:13:35 +0900 Subject: [PATCH 1/7] Bump comfyui-frontend-package to 1.42.11 (#13398) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 7e8dac182..7f065e0d4 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -comfyui-frontend-package==1.42.10 +comfyui-frontend-package==1.42.11 comfyui-workflow-templates==0.9.50 comfyui-embedded-docs==0.4.3 torch From c5569e862794c419094ee5c9d5ad224634b9ddd6 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Tue, 14 Apr 2026 11:42:23 -0700 Subject: [PATCH 2/7] Add string output to preview text node. (#13406) --- comfy_extras/nodes_preview_any.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/comfy_extras/nodes_preview_any.py b/comfy_extras/nodes_preview_any.py index b0a6f279d..0a1558f2b 100644 --- a/comfy_extras/nodes_preview_any.py +++ b/comfy_extras/nodes_preview_any.py @@ -11,7 +11,7 @@ class PreviewAny(): "required": {"source": (IO.ANY, {})}, } - RETURN_TYPES = () + RETURN_TYPES = (IO.STRING,) FUNCTION = "main" OUTPUT_NODE = True @@ -33,7 +33,7 @@ class PreviewAny(): except Exception: value = 'source exists, but could not be serialized.' - return {"ui": {"text": (value,)}} + return {"ui": {"text": (value,)}, "result": (value,)} NODE_CLASS_MAPPINGS = { "PreviewAny": PreviewAny, From 7ce3f64c784430e15731d344affffb48c55a0eaa Mon Sep 17 00:00:00 2001 From: "Daxiong (Lin)" Date: Wed, 15 Apr 2026 08:35:27 +0800 Subject: [PATCH 3/7] Update workflow templates to v0.9.54 (#13412) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 7f065e0d4..e45a20aaf 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ comfyui-frontend-package==1.42.11 -comfyui-workflow-templates==0.9.50 +comfyui-workflow-templates==0.9.54 comfyui-embedded-docs==0.4.3 torch torchsde From cb0bbde402cfb72559cc8b00f679d7735dff5c40 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Tue, 14 Apr 2026 19:54:47 -0700 Subject: [PATCH 4/7] Fix ernie on devices that don't support fp64. (#13414) --- comfy/ldm/ernie/model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/ldm/ernie/model.py b/comfy/ldm/ernie/model.py index 3dbab8dc0..f7cdb51e6 100644 --- a/comfy/ldm/ernie/model.py +++ b/comfy/ldm/ernie/model.py @@ -15,7 +15,7 @@ def rope(pos: torch.Tensor, dim: int, theta: int) -> torch.Tensor: scale = torch.arange(0, dim, 2, dtype=torch.float64, device=device) / dim omega = 1.0 / (theta**scale) - out = torch.einsum("...n,d->...nd", pos, omega) + out = torch.einsum("...n,d->...nd", pos.to(device), omega) out = torch.stack([torch.cos(out), torch.sin(out)], dim=0) return out.to(dtype=torch.float32, device=pos.device) From 8f374716ee98d378d403ebc61250e091ecd3a25c Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 14 Apr 2026 22:56:13 -0400 Subject: [PATCH 5/7] ComfyUI v0.19.1 --- comfyui_version.py | 2 +- pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/comfyui_version.py b/comfyui_version.py index 0da11d5fa..3c6dac3d9 100644 --- a/comfyui_version.py +++ b/comfyui_version.py @@ -1,3 +1,3 @@ # This file is automatically generated by the build process when version is # updated in pyproject.toml. -__version__ = "0.19.0" +__version__ = "0.19.1" diff --git a/pyproject.toml b/pyproject.toml index e8d4a9742..006ed9985 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "ComfyUI" -version = "0.19.0" +version = "0.19.1" readme = "README.md" license = { file = "LICENSE" } requires-python = ">=3.10" From 1de83f91c34a3396c667d764224054ba87027e82 Mon Sep 17 00:00:00 2001 From: Jun Yamog Date: Wed, 15 Apr 2026 21:10:36 +1200 Subject: [PATCH 6/7] Fix OOM regression in _apply() for quantized models during inference (#13372) Skip unnecessary clone of inference-mode tensors when already inside torch.inference_mode(), matching the existing guard in set_attr_param. The unconditional clone introduced in 20561aa9 caused transient VRAM doubling during model movement for FP8/quantized models. --- comfy/ops.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/ops.py b/comfy/ops.py index b5cd1d47e..7a9b4b84c 100644 --- a/comfy/ops.py +++ b/comfy/ops.py @@ -1151,7 +1151,7 @@ def mixed_precision_ops(quant_config={}, compute_dtype=torch.bfloat16, full_prec if param is None: continue p = fn(param) - if p.is_inference(): + if (not torch.is_inference_mode_enabled()) and p.is_inference(): p = p.clone() self.register_parameter(key, torch.nn.Parameter(p, requires_grad=False)) for key, buf in self._buffers.items(): From e9a2d1e4cc34ade8a655b386a3919a6d05aa290a Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 15 Apr 2026 19:59:08 -0700 Subject: [PATCH 7/7] Add a way to disable default template in text gen node. (#13424) --- comfy_extras/nodes_textgen.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/comfy_extras/nodes_textgen.py b/comfy_extras/nodes_textgen.py index f1aeb63fa..eed26c582 100644 --- a/comfy_extras/nodes_textgen.py +++ b/comfy_extras/nodes_textgen.py @@ -35,6 +35,7 @@ class TextGenerate(io.ComfyNode): io.Int.Input("max_length", default=256, min=1, max=2048), io.DynamicCombo.Input("sampling_mode", options=sampling_options, display_name="Sampling Mode"), io.Boolean.Input("thinking", optional=True, default=False, tooltip="Operate in thinking mode if the model supports it."), + io.Boolean.Input("use_default_template", optional=True, default=True, tooltip="Use the built in system prompt/template if the model has one.", advanced=True), ], outputs=[ io.String.Output(display_name="generated_text"), @@ -42,9 +43,9 @@ class TextGenerate(io.ComfyNode): ) @classmethod - def execute(cls, clip, prompt, max_length, sampling_mode, image=None, thinking=False) -> io.NodeOutput: + def execute(cls, clip, prompt, max_length, sampling_mode, image=None, thinking=False, use_default_template=True) -> io.NodeOutput: - tokens = clip.tokenize(prompt, image=image, skip_template=False, min_length=1, thinking=thinking) + tokens = clip.tokenize(prompt, image=image, skip_template=not use_default_template, min_length=1, thinking=thinking) # Get sampling parameters from dynamic combo do_sample = sampling_mode.get("sampling_mode") == "on"