mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2026-02-16 08:22:36 +08:00
Output width and height from empty latents
This commit is contained in:
parent
7e781171f9
commit
62b73b3a0c
@ -19,13 +19,17 @@ class EmptyChromaRadianceLatentImage(io.ComfyNode):
|
||||
io.Int.Input(id="height", default=1024, min=16, max=nodes.MAX_RESOLUTION, step=16),
|
||||
io.Int.Input(id="batch_size", default=1, min=1, max=4096),
|
||||
],
|
||||
outputs=[io.Latent().Output()],
|
||||
outputs=[
|
||||
io.Latent().Output(),
|
||||
io.Int.Output(display_name="width"),
|
||||
io.Int.Output(display_name="height"),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, *, width: int, height: int, batch_size: int=1) -> io.NodeOutput:
|
||||
latent = torch.zeros((batch_size, 3, height, width), device=comfy.model_management.intermediate_device())
|
||||
return io.NodeOutput({"samples":latent})
|
||||
return io.NodeOutput({"samples":latent}, width, height)
|
||||
|
||||
|
||||
class ChromaRadianceOptions(io.ComfyNode):
|
||||
|
||||
@ -20,13 +20,17 @@ class EmptyCosmosLatentVideo(io.ComfyNode):
|
||||
io.Int.Input("length", default=121, min=1, max=nodes.MAX_RESOLUTION, step=8),
|
||||
io.Int.Input("batch_size", default=1, min=1, max=4096),
|
||||
],
|
||||
outputs=[io.Latent.Output()],
|
||||
outputs=[
|
||||
io.Latent.Output(),
|
||||
io.Int.Output(display_name="width"),
|
||||
io.Int.Output(display_name="height"),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, width, height, length, batch_size=1) -> io.NodeOutput:
|
||||
latent = torch.zeros([batch_size, 16, ((length - 1) // 8) + 1, height // 8, width // 8], device=comfy.model_management.intermediate_device())
|
||||
return io.NodeOutput({"samples": latent})
|
||||
return io.NodeOutput({"samples": latent}, width, height)
|
||||
|
||||
|
||||
def vae_encode_with_padding(vae, image, width, height, length, padding=0):
|
||||
|
||||
@ -26,13 +26,15 @@ class EmptyLTXVLatentVideo(io.ComfyNode):
|
||||
],
|
||||
outputs=[
|
||||
io.Latent.Output(),
|
||||
io.Int.Output(display_name="width"),
|
||||
io.Int.Output(display_name="height"),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, width, height, length, batch_size=1) -> io.NodeOutput:
|
||||
latent = torch.zeros([batch_size, 128, ((length - 1) // 8) + 1, height // 32, width // 32], device=comfy.model_management.intermediate_device())
|
||||
return io.NodeOutput({"samples": latent})
|
||||
return io.NodeOutput({"samples": latent}, width, height)
|
||||
|
||||
|
||||
class LTXVImgToVideo(io.ComfyNode):
|
||||
|
||||
@ -19,13 +19,15 @@ class EmptyMochiLatentVideo(io.ComfyNode):
|
||||
],
|
||||
outputs=[
|
||||
io.Latent.Output(),
|
||||
io.Int.Output(display_name="width"),
|
||||
io.Int.Output(display_name="height"),
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, width, height, length, batch_size=1) -> io.NodeOutput:
|
||||
latent = torch.zeros([batch_size, 12, ((length - 1) // 6) + 1, height // 8, width // 8], device=comfy.model_management.intermediate_device())
|
||||
return io.NodeOutput({"samples": latent})
|
||||
return io.NodeOutput({"samples": latent}, width, height)
|
||||
|
||||
|
||||
class MochiExtension(ComfyExtension):
|
||||
|
||||
@ -35,14 +35,16 @@ class EmptySD3LatentImage:
|
||||
return {"required": { "width": ("INT", {"default": 1024, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 16}),
|
||||
"height": ("INT", {"default": 1024, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 16}),
|
||||
"batch_size": ("INT", {"default": 1, "min": 1, "max": 4096})}}
|
||||
RETURN_TYPES = ("LATENT",)
|
||||
RETURN_TYPES = ("LATENT", "INT", "INT")
|
||||
RETURN_NAMES = ("LATENT", "width", "height")
|
||||
OUTPUT_TOOLTIPS = ("The empty latent image batch.", "The width of the latent images.", "The height of the latent images.")
|
||||
FUNCTION = "generate"
|
||||
|
||||
CATEGORY = "latent/sd3"
|
||||
|
||||
def generate(self, width, height, batch_size=1):
|
||||
latent = torch.zeros([batch_size, 16, height // 8, width // 8], device=self.device)
|
||||
return ({"samples":latent}, )
|
||||
return ({"samples":latent}, width, height)
|
||||
|
||||
|
||||
class CLIPTextEncodeSD3:
|
||||
|
||||
@ -39,6 +39,8 @@ class StableCascade_EmptyLatentImage(io.ComfyNode):
|
||||
outputs=[
|
||||
io.Latent.Output(display_name="stage_c"),
|
||||
io.Latent.Output(display_name="stage_b"),
|
||||
io.Int.Output(display_name="width"),
|
||||
io.Int.Output(display_name="height"),
|
||||
],
|
||||
)
|
||||
|
||||
@ -50,7 +52,9 @@ class StableCascade_EmptyLatentImage(io.ComfyNode):
|
||||
"samples": c_latent,
|
||||
}, {
|
||||
"samples": b_latent,
|
||||
})
|
||||
},
|
||||
width,
|
||||
height)
|
||||
|
||||
|
||||
class StableCascade_StageC_VAEEncode(io.ComfyNode):
|
||||
|
||||
7
nodes.py
7
nodes.py
@ -1174,8 +1174,9 @@ class EmptyLatentImage:
|
||||
"batch_size": ("INT", {"default": 1, "min": 1, "max": 4096, "tooltip": "The number of latent images in the batch."})
|
||||
}
|
||||
}
|
||||
RETURN_TYPES = ("LATENT",)
|
||||
OUTPUT_TOOLTIPS = ("The empty latent image batch.",)
|
||||
RETURN_TYPES = ("LATENT", "INT", "INT")
|
||||
RETURN_NAMES = ("LATENT", "width", "height")
|
||||
OUTPUT_TOOLTIPS = ("The empty latent image batch.", "The width of the latent images.", "The height of the latent images.")
|
||||
FUNCTION = "generate"
|
||||
|
||||
CATEGORY = "latent"
|
||||
@ -1183,7 +1184,7 @@ class EmptyLatentImage:
|
||||
|
||||
def generate(self, width, height, batch_size=1):
|
||||
latent = torch.zeros([batch_size, 4, height // 8, width // 8], device=self.device)
|
||||
return ({"samples":latent}, )
|
||||
return ({"samples":latent}, width, height)
|
||||
|
||||
|
||||
class LatentFromBatch:
|
||||
|
||||
Loading…
Reference in New Issue
Block a user