Output width and height from empty latents

This commit is contained in:
Alexis ROLLAND 2025-10-04 21:08:12 +08:00
parent 7e781171f9
commit 62b73b3a0c
7 changed files with 31 additions and 12 deletions

View File

@ -19,13 +19,17 @@ class EmptyChromaRadianceLatentImage(io.ComfyNode):
io.Int.Input(id="height", default=1024, min=16, max=nodes.MAX_RESOLUTION, step=16), io.Int.Input(id="height", default=1024, min=16, max=nodes.MAX_RESOLUTION, step=16),
io.Int.Input(id="batch_size", default=1, min=1, max=4096), io.Int.Input(id="batch_size", default=1, min=1, max=4096),
], ],
outputs=[io.Latent().Output()], outputs=[
io.Latent().Output(),
io.Int.Output(display_name="width"),
io.Int.Output(display_name="height"),
],
) )
@classmethod @classmethod
def execute(cls, *, width: int, height: int, batch_size: int=1) -> io.NodeOutput: def execute(cls, *, width: int, height: int, batch_size: int=1) -> io.NodeOutput:
latent = torch.zeros((batch_size, 3, height, width), device=comfy.model_management.intermediate_device()) latent = torch.zeros((batch_size, 3, height, width), device=comfy.model_management.intermediate_device())
return io.NodeOutput({"samples":latent}) return io.NodeOutput({"samples":latent}, width, height)
class ChromaRadianceOptions(io.ComfyNode): class ChromaRadianceOptions(io.ComfyNode):

View File

@ -20,13 +20,17 @@ class EmptyCosmosLatentVideo(io.ComfyNode):
io.Int.Input("length", default=121, min=1, max=nodes.MAX_RESOLUTION, step=8), io.Int.Input("length", default=121, min=1, max=nodes.MAX_RESOLUTION, step=8),
io.Int.Input("batch_size", default=1, min=1, max=4096), io.Int.Input("batch_size", default=1, min=1, max=4096),
], ],
outputs=[io.Latent.Output()], outputs=[
io.Latent.Output(),
io.Int.Output(display_name="width"),
io.Int.Output(display_name="height"),
],
) )
@classmethod @classmethod
def execute(cls, width, height, length, batch_size=1) -> io.NodeOutput: def execute(cls, width, height, length, batch_size=1) -> io.NodeOutput:
latent = torch.zeros([batch_size, 16, ((length - 1) // 8) + 1, height // 8, width // 8], device=comfy.model_management.intermediate_device()) latent = torch.zeros([batch_size, 16, ((length - 1) // 8) + 1, height // 8, width // 8], device=comfy.model_management.intermediate_device())
return io.NodeOutput({"samples": latent}) return io.NodeOutput({"samples": latent}, width, height)
def vae_encode_with_padding(vae, image, width, height, length, padding=0): def vae_encode_with_padding(vae, image, width, height, length, padding=0):

View File

@ -26,13 +26,15 @@ class EmptyLTXVLatentVideo(io.ComfyNode):
], ],
outputs=[ outputs=[
io.Latent.Output(), io.Latent.Output(),
io.Int.Output(display_name="width"),
io.Int.Output(display_name="height"),
], ],
) )
@classmethod @classmethod
def execute(cls, width, height, length, batch_size=1) -> io.NodeOutput: def execute(cls, width, height, length, batch_size=1) -> io.NodeOutput:
latent = torch.zeros([batch_size, 128, ((length - 1) // 8) + 1, height // 32, width // 32], device=comfy.model_management.intermediate_device()) latent = torch.zeros([batch_size, 128, ((length - 1) // 8) + 1, height // 32, width // 32], device=comfy.model_management.intermediate_device())
return io.NodeOutput({"samples": latent}) return io.NodeOutput({"samples": latent}, width, height)
class LTXVImgToVideo(io.ComfyNode): class LTXVImgToVideo(io.ComfyNode):

View File

@ -19,13 +19,15 @@ class EmptyMochiLatentVideo(io.ComfyNode):
], ],
outputs=[ outputs=[
io.Latent.Output(), io.Latent.Output(),
io.Int.Output(display_name="width"),
io.Int.Output(display_name="height"),
], ],
) )
@classmethod @classmethod
def execute(cls, width, height, length, batch_size=1) -> io.NodeOutput: def execute(cls, width, height, length, batch_size=1) -> io.NodeOutput:
latent = torch.zeros([batch_size, 12, ((length - 1) // 6) + 1, height // 8, width // 8], device=comfy.model_management.intermediate_device()) latent = torch.zeros([batch_size, 12, ((length - 1) // 6) + 1, height // 8, width // 8], device=comfy.model_management.intermediate_device())
return io.NodeOutput({"samples": latent}) return io.NodeOutput({"samples": latent}, width, height)
class MochiExtension(ComfyExtension): class MochiExtension(ComfyExtension):

View File

@ -35,14 +35,16 @@ class EmptySD3LatentImage:
return {"required": { "width": ("INT", {"default": 1024, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 16}), return {"required": { "width": ("INT", {"default": 1024, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 16}),
"height": ("INT", {"default": 1024, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 16}), "height": ("INT", {"default": 1024, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 16}),
"batch_size": ("INT", {"default": 1, "min": 1, "max": 4096})}} "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096})}}
RETURN_TYPES = ("LATENT",) RETURN_TYPES = ("LATENT", "INT", "INT")
RETURN_NAMES = ("LATENT", "width", "height")
OUTPUT_TOOLTIPS = ("The empty latent image batch.", "The width of the latent images.", "The height of the latent images.")
FUNCTION = "generate" FUNCTION = "generate"
CATEGORY = "latent/sd3" CATEGORY = "latent/sd3"
def generate(self, width, height, batch_size=1): def generate(self, width, height, batch_size=1):
latent = torch.zeros([batch_size, 16, height // 8, width // 8], device=self.device) latent = torch.zeros([batch_size, 16, height // 8, width // 8], device=self.device)
return ({"samples":latent}, ) return ({"samples":latent}, width, height)
class CLIPTextEncodeSD3: class CLIPTextEncodeSD3:

View File

@ -39,6 +39,8 @@ class StableCascade_EmptyLatentImage(io.ComfyNode):
outputs=[ outputs=[
io.Latent.Output(display_name="stage_c"), io.Latent.Output(display_name="stage_c"),
io.Latent.Output(display_name="stage_b"), io.Latent.Output(display_name="stage_b"),
io.Int.Output(display_name="width"),
io.Int.Output(display_name="height"),
], ],
) )
@ -50,7 +52,9 @@ class StableCascade_EmptyLatentImage(io.ComfyNode):
"samples": c_latent, "samples": c_latent,
}, { }, {
"samples": b_latent, "samples": b_latent,
}) },
width,
height)
class StableCascade_StageC_VAEEncode(io.ComfyNode): class StableCascade_StageC_VAEEncode(io.ComfyNode):

View File

@ -1174,8 +1174,9 @@ class EmptyLatentImage:
"batch_size": ("INT", {"default": 1, "min": 1, "max": 4096, "tooltip": "The number of latent images in the batch."}) "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096, "tooltip": "The number of latent images in the batch."})
} }
} }
RETURN_TYPES = ("LATENT",) RETURN_TYPES = ("LATENT", "INT", "INT")
OUTPUT_TOOLTIPS = ("The empty latent image batch.",) RETURN_NAMES = ("LATENT", "width", "height")
OUTPUT_TOOLTIPS = ("The empty latent image batch.", "The width of the latent images.", "The height of the latent images.")
FUNCTION = "generate" FUNCTION = "generate"
CATEGORY = "latent" CATEGORY = "latent"
@ -1183,7 +1184,7 @@ class EmptyLatentImage:
def generate(self, width, height, batch_size=1): def generate(self, width, height, batch_size=1):
latent = torch.zeros([batch_size, 4, height // 8, width // 8], device=self.device) latent = torch.zeros([batch_size, 4, height // 8, width // 8], device=self.device)
return ({"samples":latent}, ) return ({"samples":latent}, width, height)
class LatentFromBatch: class LatentFromBatch: