From f72c6616b2e91e4021591895192cef8b9d4d1c75 Mon Sep 17 00:00:00 2001 From: chaObserv <154517000+chaObserv@users.noreply.github.com> Date: Thu, 16 Oct 2025 06:12:25 +0800 Subject: [PATCH 1/5] Add TemporalScoreRescaling node (#10351) * Add TemporalScoreRescaling node * Mention image generation in tsr_k's tooltip --- comfy_extras/nodes_eps.py | 95 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 95 insertions(+) diff --git a/comfy_extras/nodes_eps.py b/comfy_extras/nodes_eps.py index 7852d85e5..4d8061741 100644 --- a/comfy_extras/nodes_eps.py +++ b/comfy_extras/nodes_eps.py @@ -1,5 +1,7 @@ +import torch from typing_extensions import override +from comfy.k_diffusion.sampling import sigma_to_half_log_snr from comfy_api.latest import ComfyExtension, io @@ -63,12 +65,105 @@ class EpsilonScaling(io.ComfyNode): return io.NodeOutput(model_clone) +def compute_tsr_rescaling_factor( + snr: torch.Tensor, tsr_k: float, tsr_variance: float +) -> torch.Tensor: + """Compute the rescaling score ratio in Temporal Score Rescaling. + + See equation (6) in https://arxiv.org/pdf/2510.01184v1. + """ + posinf_mask = torch.isposinf(snr) + rescaling_factor = (snr * tsr_variance + 1) / (snr * tsr_variance / tsr_k + 1) + return torch.where(posinf_mask, tsr_k, rescaling_factor) # when snr → inf, r = tsr_k + + +class TemporalScoreRescaling(io.ComfyNode): + @classmethod + def define_schema(cls): + return io.Schema( + node_id="TemporalScoreRescaling", + display_name="TSR - Temporal Score Rescaling", + category="model_patches/unet", + inputs=[ + io.Model.Input("model"), + io.Float.Input( + "tsr_k", + tooltip=( + "Controls the rescaling strength.\n" + "Lower k produces more detailed results; higher k produces smoother results in image generation. Setting k = 1 disables rescaling." + ), + default=0.95, + min=0.01, + max=100.0, + step=0.001, + display_mode=io.NumberDisplay.number, + ), + io.Float.Input( + "tsr_sigma", + tooltip=( + "Controls how early rescaling takes effect.\n" + "Larger values take effect earlier." + ), + default=1.0, + min=0.01, + max=100.0, + step=0.001, + display_mode=io.NumberDisplay.number, + ), + ], + outputs=[ + io.Model.Output( + display_name="patched_model", + ), + ], + description=( + "[Post-CFG Function]\n" + "TSR - Temporal Score Rescaling (2510.01184)\n\n" + "Rescaling the model's score or noise to steer the sampling diversity.\n" + ), + ) + + @classmethod + def execute(cls, model, tsr_k, tsr_sigma) -> io.NodeOutput: + tsr_variance = tsr_sigma**2 + + def temporal_score_rescaling(args): + denoised = args["denoised"] + x = args["input"] + sigma = args["sigma"] + curr_model = args["model"] + + # No rescaling (r = 1) or no noise + if tsr_k == 1 or sigma == 0: + return denoised + + model_sampling = curr_model.current_patcher.get_model_object("model_sampling") + half_log_snr = sigma_to_half_log_snr(sigma, model_sampling) + snr = (2 * half_log_snr).exp() + + # No rescaling needed (r = 1) + if snr == 0: + return denoised + + rescaling_r = compute_tsr_rescaling_factor(snr, tsr_k, tsr_variance) + + # Derived from scaled_denoised = (x - r * sigma * noise) / alpha + alpha = sigma * half_log_snr.exp() + return torch.lerp(x / alpha, denoised, rescaling_r) + + m = model.clone() + m.set_model_sampler_post_cfg_function(temporal_score_rescaling) + return io.NodeOutput(m) + + class EpsilonScalingExtension(ComfyExtension): @override async def get_node_list(self) -> list[type[io.ComfyNode]]: return [ EpsilonScaling, + TemporalScoreRescaling, ] + async def comfy_entrypoint() -> EpsilonScalingExtension: return EpsilonScalingExtension() From 74b7f0b04ba19926286518b0a0179290b79bfae0 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Thu, 16 Oct 2025 01:41:45 +0300 Subject: [PATCH 2/5] feat(api-nodes): add Veo3.1 model (#10357) --- comfy_api_nodes/nodes_veo2.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/comfy_api_nodes/nodes_veo2.py b/comfy_api_nodes/nodes_veo2.py index 4588a7991..4ab5c5186 100644 --- a/comfy_api_nodes/nodes_veo2.py +++ b/comfy_api_nodes/nodes_veo2.py @@ -27,6 +27,13 @@ from comfy_api_nodes.apinode_utils import ( ) AVERAGE_DURATION_VIDEO_GEN = 32 +MODELS_MAP = { + "veo-2.0-generate-001": "veo-2.0-generate-001", + "veo-3.1-generate": "veo-3.1-generate-preview", + "veo-3.1-fast-generate": "veo-3.1-fast-generate-preview", + "veo-3.0-generate-001": "veo-3.0-generate-001", + "veo-3.0-fast-generate-001": "veo-3.0-fast-generate-001", +} def convert_image_to_base64(image: torch.Tensor): if image is None: @@ -158,6 +165,7 @@ class VeoVideoGenerationNode(IO.ComfyNode): model="veo-2.0-generate-001", generate_audio=False, ): + model = MODELS_MAP[model] # Prepare the instances for the request instances = [] @@ -385,7 +393,7 @@ class Veo3VideoGenerationNode(VeoVideoGenerationNode): ), IO.Combo.Input( "model", - options=["veo-3.0-generate-001", "veo-3.0-fast-generate-001"], + options=list(MODELS_MAP.keys()), default="veo-3.0-generate-001", tooltip="Veo 3 model to use for video generation", optional=True, From 6b035bfce25b5336ed2a39c72972a8a36a80f9bd Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 15 Oct 2025 15:48:12 -0700 Subject: [PATCH 3/5] Latest pytorch stable is cu130 (#10361) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index db1fdaf3c..b0731db33 100644 --- a/README.md +++ b/README.md @@ -255,7 +255,7 @@ This is the command to install the Pytorch xpu nightly which might have some per Nvidia users should install stable pytorch using this command: -```pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu129``` +```pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu130``` This is the command to install pytorch nightly instead which might have performance improvements. From 493b81e48f4067da95e4cee36d42a3516338da79 Mon Sep 17 00:00:00 2001 From: Jedrzej Kosinski Date: Wed, 15 Oct 2025 16:47:26 -0700 Subject: [PATCH 4/5] Fix order of inputs nested merge_nested_dicts (#10362) --- comfy/patcher_extension.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/patcher_extension.py b/comfy/patcher_extension.py index 46cc7b2a8..5ee4d5ee5 100644 --- a/comfy/patcher_extension.py +++ b/comfy/patcher_extension.py @@ -150,7 +150,7 @@ def merge_nested_dicts(dict1: dict, dict2: dict, copy_dict1=True): for key, value in dict2.items(): if isinstance(value, dict): curr_value = merged_dict.setdefault(key, {}) - merged_dict[key] = merge_nested_dicts(value, curr_value) + merged_dict[key] = merge_nested_dicts(curr_value, value) elif isinstance(value, list): merged_dict.setdefault(key, []).extend(value) else: From afa8a24fe1f81d447b961fdf41f47f9094d28919 Mon Sep 17 00:00:00 2001 From: Faych <90372299+neverbiasu@users.noreply.github.com> Date: Thu, 16 Oct 2025 01:16:09 +0100 Subject: [PATCH 5/5] refactor: Replace manual patches merging with merge_nested_dicts (#10360) --- comfy/samplers.py | 15 ++++----------- 1 file changed, 4 insertions(+), 11 deletions(-) diff --git a/comfy/samplers.py b/comfy/samplers.py index c59e296a1..e7efaf470 100755 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -306,17 +306,10 @@ def _calc_cond_batch(model: BaseModel, conds: list[list[dict]], x_in: torch.Tens copy_dict1=False) if patches is not None: - # TODO: replace with merge_nested_dicts function - if "patches" in transformer_options: - cur_patches = transformer_options["patches"].copy() - for p in patches: - if p in cur_patches: - cur_patches[p] = cur_patches[p] + patches[p] - else: - cur_patches[p] = patches[p] - transformer_options["patches"] = cur_patches - else: - transformer_options["patches"] = patches + transformer_options["patches"] = comfy.patcher_extension.merge_nested_dicts( + transformer_options.get("patches", {}), + patches + ) transformer_options["cond_or_uncond"] = cond_or_uncond[:] transformer_options["uuids"] = uuids[:]