From 62b73b3a0cf85c376bd8b9e116999136bda1a6f9 Mon Sep 17 00:00:00 2001 From: Alexis ROLLAND Date: Sat, 4 Oct 2025 21:08:12 +0800 Subject: [PATCH] Output width and height from empty latents --- comfy_extras/nodes_chroma_radiance.py | 8 ++++++-- comfy_extras/nodes_cosmos.py | 8 ++++++-- comfy_extras/nodes_lt.py | 4 +++- comfy_extras/nodes_mochi.py | 4 +++- comfy_extras/nodes_sd3.py | 6 ++++-- comfy_extras/nodes_stable_cascade.py | 6 +++++- nodes.py | 7 ++++--- 7 files changed, 31 insertions(+), 12 deletions(-) diff --git a/comfy_extras/nodes_chroma_radiance.py b/comfy_extras/nodes_chroma_radiance.py index 381989818..592c4affb 100644 --- a/comfy_extras/nodes_chroma_radiance.py +++ b/comfy_extras/nodes_chroma_radiance.py @@ -19,13 +19,17 @@ class EmptyChromaRadianceLatentImage(io.ComfyNode): io.Int.Input(id="height", default=1024, min=16, max=nodes.MAX_RESOLUTION, step=16), io.Int.Input(id="batch_size", default=1, min=1, max=4096), ], - outputs=[io.Latent().Output()], + outputs=[ + io.Latent().Output(), + io.Int.Output(display_name="width"), + io.Int.Output(display_name="height"), + ], ) @classmethod def execute(cls, *, width: int, height: int, batch_size: int=1) -> io.NodeOutput: latent = torch.zeros((batch_size, 3, height, width), device=comfy.model_management.intermediate_device()) - return io.NodeOutput({"samples":latent}) + return io.NodeOutput({"samples":latent}, width, height) class ChromaRadianceOptions(io.ComfyNode): diff --git a/comfy_extras/nodes_cosmos.py b/comfy_extras/nodes_cosmos.py index 7dd129d19..591face13 100644 --- a/comfy_extras/nodes_cosmos.py +++ b/comfy_extras/nodes_cosmos.py @@ -20,13 +20,17 @@ class EmptyCosmosLatentVideo(io.ComfyNode): io.Int.Input("length", default=121, min=1, max=nodes.MAX_RESOLUTION, step=8), io.Int.Input("batch_size", default=1, min=1, max=4096), ], - outputs=[io.Latent.Output()], + outputs=[ + io.Latent.Output(), + io.Int.Output(display_name="width"), + io.Int.Output(display_name="height"), + ], ) @classmethod def execute(cls, width, height, length, batch_size=1) -> io.NodeOutput: latent = torch.zeros([batch_size, 16, ((length - 1) // 8) + 1, height // 8, width // 8], device=comfy.model_management.intermediate_device()) - return io.NodeOutput({"samples": latent}) + return io.NodeOutput({"samples": latent}, width, height) def vae_encode_with_padding(vae, image, width, height, length, padding=0): diff --git a/comfy_extras/nodes_lt.py b/comfy_extras/nodes_lt.py index b51d15804..32591f119 100644 --- a/comfy_extras/nodes_lt.py +++ b/comfy_extras/nodes_lt.py @@ -26,13 +26,15 @@ class EmptyLTXVLatentVideo(io.ComfyNode): ], outputs=[ io.Latent.Output(), + io.Int.Output(display_name="width"), + io.Int.Output(display_name="height"), ], ) @classmethod def execute(cls, width, height, length, batch_size=1) -> io.NodeOutput: latent = torch.zeros([batch_size, 128, ((length - 1) // 8) + 1, height // 32, width // 32], device=comfy.model_management.intermediate_device()) - return io.NodeOutput({"samples": latent}) + return io.NodeOutput({"samples": latent}, width, height) class LTXVImgToVideo(io.ComfyNode): diff --git a/comfy_extras/nodes_mochi.py b/comfy_extras/nodes_mochi.py index d750194fc..bb8c14b1b 100644 --- a/comfy_extras/nodes_mochi.py +++ b/comfy_extras/nodes_mochi.py @@ -19,13 +19,15 @@ class EmptyMochiLatentVideo(io.ComfyNode): ], outputs=[ io.Latent.Output(), + io.Int.Output(display_name="width"), + io.Int.Output(display_name="height"), ], ) @classmethod def execute(cls, width, height, length, batch_size=1) -> io.NodeOutput: latent = torch.zeros([batch_size, 12, ((length - 1) // 6) + 1, height // 8, width // 8], device=comfy.model_management.intermediate_device()) - return io.NodeOutput({"samples": latent}) + return io.NodeOutput({"samples": latent}, width, height) class MochiExtension(ComfyExtension): diff --git a/comfy_extras/nodes_sd3.py b/comfy_extras/nodes_sd3.py index d75b29e60..860e0f981 100644 --- a/comfy_extras/nodes_sd3.py +++ b/comfy_extras/nodes_sd3.py @@ -35,14 +35,16 @@ class EmptySD3LatentImage: return {"required": { "width": ("INT", {"default": 1024, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 16}), "height": ("INT", {"default": 1024, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 16}), "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096})}} - RETURN_TYPES = ("LATENT",) + RETURN_TYPES = ("LATENT", "INT", "INT") + RETURN_NAMES = ("LATENT", "width", "height") + OUTPUT_TOOLTIPS = ("The empty latent image batch.", "The width of the latent images.", "The height of the latent images.") FUNCTION = "generate" CATEGORY = "latent/sd3" def generate(self, width, height, batch_size=1): latent = torch.zeros([batch_size, 16, height // 8, width // 8], device=self.device) - return ({"samples":latent}, ) + return ({"samples":latent}, width, height) class CLIPTextEncodeSD3: diff --git a/comfy_extras/nodes_stable_cascade.py b/comfy_extras/nodes_stable_cascade.py index 04c0b366a..813040ae6 100644 --- a/comfy_extras/nodes_stable_cascade.py +++ b/comfy_extras/nodes_stable_cascade.py @@ -39,6 +39,8 @@ class StableCascade_EmptyLatentImage(io.ComfyNode): outputs=[ io.Latent.Output(display_name="stage_c"), io.Latent.Output(display_name="stage_b"), + io.Int.Output(display_name="width"), + io.Int.Output(display_name="height"), ], ) @@ -50,7 +52,9 @@ class StableCascade_EmptyLatentImage(io.ComfyNode): "samples": c_latent, }, { "samples": b_latent, - }) + }, + width, + height) class StableCascade_StageC_VAEEncode(io.ComfyNode): diff --git a/nodes.py b/nodes.py index 88d712993..efbd338a7 100644 --- a/nodes.py +++ b/nodes.py @@ -1174,8 +1174,9 @@ class EmptyLatentImage: "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096, "tooltip": "The number of latent images in the batch."}) } } - RETURN_TYPES = ("LATENT",) - OUTPUT_TOOLTIPS = ("The empty latent image batch.",) + RETURN_TYPES = ("LATENT", "INT", "INT") + RETURN_NAMES = ("LATENT", "width", "height") + OUTPUT_TOOLTIPS = ("The empty latent image batch.", "The width of the latent images.", "The height of the latent images.") FUNCTION = "generate" CATEGORY = "latent" @@ -1183,7 +1184,7 @@ class EmptyLatentImage: def generate(self, width, height, batch_size=1): latent = torch.zeros([batch_size, 4, height // 8, width // 8], device=self.device) - return ({"samples":latent}, ) + return ({"samples":latent}, width, height) class LatentFromBatch: