api-nodes: fixed dynamic pricing format; import comfy_io directly (#10336)

This commit is contained in:
Alexander Piskun 2025-10-14 09:55:56 +03:00 committed by GitHub
parent 84867067ea
commit 7a883849ea
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
19 changed files with 1331 additions and 1322 deletions

View File

@ -114,7 +114,9 @@ if TYPE_CHECKING:
ComfyAPISync: Type[comfy_api.latest.generated.ComfyAPISyncStub.ComfyAPISyncStub] ComfyAPISync: Type[comfy_api.latest.generated.ComfyAPISyncStub.ComfyAPISyncStub]
ComfyAPISync = create_sync_class(ComfyAPI_latest) ComfyAPISync = create_sync_class(ComfyAPI_latest)
comfy_io = io # create the new alias for io # create new aliases for io and ui
IO = io
UI = ui
__all__ = [ __all__ = [
"ComfyAPI", "ComfyAPI",
@ -124,6 +126,7 @@ __all__ = [
"Types", "Types",
"ComfyExtension", "ComfyExtension",
"io", "io",
"comfy_io", "IO",
"ui", "ui",
"UI",
] ]

View File

@ -3,6 +3,7 @@ import aiohttp
import io import io
import logging import logging
import mimetypes import mimetypes
import os
from typing import Optional, Union from typing import Optional, Union
from comfy.utils import common_upscale from comfy.utils import common_upscale
from comfy_api.input_impl import VideoFromFile from comfy_api.input_impl import VideoFromFile
@ -702,3 +703,16 @@ def image_tensor_pair_to_batch(
"center", "center",
).movedim(1, -1) ).movedim(1, -1)
return torch.cat((image1, image2), dim=0) return torch.cat((image1, image2), dim=0)
def get_size(path_or_object: Union[str, io.BytesIO]) -> int:
if isinstance(path_or_object, str):
return os.path.getsize(path_or_object)
return len(path_or_object.getvalue())
def validate_container_format_is_mp4(video: VideoInput) -> None:
"""Validates video container format is MP4."""
container_format = video.get_container_format()
if container_format not in ["mp4", "mov,mp4,m4a,3gp,3g2,mj2"]:
raise ValueError(f"Only MP4 container format supported. Got: {container_format}")

View File

@ -845,7 +845,7 @@ class PollingOperation(Generic[T, R]):
if not self.node_id: if not self.node_id:
return return
if self.extracted_price is not None: if self.extracted_price is not None:
text = f"Price: {self.extracted_price}$\n{text}" text = f"Price: ${self.extracted_price}\n{text}"
PromptServer.instance.send_progress_text(text, self.node_id) PromptServer.instance.send_progress_text(text, self.node_id)
def _display_time_progress_on_node(self, time_completed: int | float): def _display_time_progress_on_node(self, time_completed: int | float):

View File

@ -3,7 +3,7 @@ import io
from inspect import cleandoc from inspect import cleandoc
from typing import Union, Optional from typing import Union, Optional
from typing_extensions import override from typing_extensions import override
from comfy_api.latest import ComfyExtension, io as comfy_io from comfy_api.latest import ComfyExtension, IO
from comfy_api_nodes.apis.bfl_api import ( from comfy_api_nodes.apis.bfl_api import (
BFLStatus, BFLStatus,
BFLFluxExpandImageRequest, BFLFluxExpandImageRequest,
@ -131,7 +131,7 @@ def convert_image_to_base64(image: torch.Tensor):
return base64.b64encode(img_byte_arr.getvalue()).decode() return base64.b64encode(img_byte_arr.getvalue()).decode()
class FluxProUltraImageNode(comfy_io.ComfyNode): class FluxProUltraImageNode(IO.ComfyNode):
""" """
Generates images using Flux Pro 1.1 Ultra via api based on prompt and resolution. Generates images using Flux Pro 1.1 Ultra via api based on prompt and resolution.
""" """
@ -142,25 +142,25 @@ class FluxProUltraImageNode(comfy_io.ComfyNode):
MAXIMUM_RATIO_STR = "4:1" MAXIMUM_RATIO_STR = "4:1"
@classmethod @classmethod
def define_schema(cls) -> comfy_io.Schema: def define_schema(cls) -> IO.Schema:
return comfy_io.Schema( return IO.Schema(
node_id="FluxProUltraImageNode", node_id="FluxProUltraImageNode",
display_name="Flux 1.1 [pro] Ultra Image", display_name="Flux 1.1 [pro] Ultra Image",
category="api node/image/BFL", category="api node/image/BFL",
description=cleandoc(cls.__doc__ or ""), description=cleandoc(cls.__doc__ or ""),
inputs=[ inputs=[
comfy_io.String.Input( IO.String.Input(
"prompt", "prompt",
multiline=True, multiline=True,
default="", default="",
tooltip="Prompt for the image generation", tooltip="Prompt for the image generation",
), ),
comfy_io.Boolean.Input( IO.Boolean.Input(
"prompt_upsampling", "prompt_upsampling",
default=False, default=False,
tooltip="Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation, but results are nondeterministic (same seed will not produce exactly the same result).", tooltip="Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation, but results are nondeterministic (same seed will not produce exactly the same result).",
), ),
comfy_io.Int.Input( IO.Int.Input(
"seed", "seed",
default=0, default=0,
min=0, min=0,
@ -168,21 +168,21 @@ class FluxProUltraImageNode(comfy_io.ComfyNode):
control_after_generate=True, control_after_generate=True,
tooltip="The random seed used for creating the noise.", tooltip="The random seed used for creating the noise.",
), ),
comfy_io.String.Input( IO.String.Input(
"aspect_ratio", "aspect_ratio",
default="16:9", default="16:9",
tooltip="Aspect ratio of image; must be between 1:4 and 4:1.", tooltip="Aspect ratio of image; must be between 1:4 and 4:1.",
), ),
comfy_io.Boolean.Input( IO.Boolean.Input(
"raw", "raw",
default=False, default=False,
tooltip="When True, generate less processed, more natural-looking images.", tooltip="When True, generate less processed, more natural-looking images.",
), ),
comfy_io.Image.Input( IO.Image.Input(
"image_prompt", "image_prompt",
optional=True, optional=True,
), ),
comfy_io.Float.Input( IO.Float.Input(
"image_prompt_strength", "image_prompt_strength",
default=0.1, default=0.1,
min=0.0, min=0.0,
@ -192,11 +192,11 @@ class FluxProUltraImageNode(comfy_io.ComfyNode):
optional=True, optional=True,
), ),
], ],
outputs=[comfy_io.Image.Output()], outputs=[IO.Image.Output()],
hidden=[ hidden=[
comfy_io.Hidden.auth_token_comfy_org, IO.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org, IO.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id, IO.Hidden.unique_id,
], ],
is_api_node=True, is_api_node=True,
) )
@ -225,7 +225,7 @@ class FluxProUltraImageNode(comfy_io.ComfyNode):
seed=0, seed=0,
image_prompt=None, image_prompt=None,
image_prompt_strength=0.1, image_prompt_strength=0.1,
) -> comfy_io.NodeOutput: ) -> IO.NodeOutput:
if image_prompt is None: if image_prompt is None:
validate_string(prompt, strip_whitespace=False) validate_string(prompt, strip_whitespace=False)
operation = SynchronousOperation( operation = SynchronousOperation(
@ -262,10 +262,10 @@ class FluxProUltraImageNode(comfy_io.ComfyNode):
}, },
) )
output_image = await handle_bfl_synchronous_operation(operation, node_id=cls.hidden.unique_id) output_image = await handle_bfl_synchronous_operation(operation, node_id=cls.hidden.unique_id)
return comfy_io.NodeOutput(output_image) return IO.NodeOutput(output_image)
class FluxKontextProImageNode(comfy_io.ComfyNode): class FluxKontextProImageNode(IO.ComfyNode):
""" """
Edits images using Flux.1 Kontext [pro] via api based on prompt and aspect ratio. Edits images using Flux.1 Kontext [pro] via api based on prompt and aspect ratio.
""" """
@ -276,25 +276,25 @@ class FluxKontextProImageNode(comfy_io.ComfyNode):
MAXIMUM_RATIO_STR = "4:1" MAXIMUM_RATIO_STR = "4:1"
@classmethod @classmethod
def define_schema(cls) -> comfy_io.Schema: def define_schema(cls) -> IO.Schema:
return comfy_io.Schema( return IO.Schema(
node_id=cls.NODE_ID, node_id=cls.NODE_ID,
display_name=cls.DISPLAY_NAME, display_name=cls.DISPLAY_NAME,
category="api node/image/BFL", category="api node/image/BFL",
description=cleandoc(cls.__doc__ or ""), description=cleandoc(cls.__doc__ or ""),
inputs=[ inputs=[
comfy_io.String.Input( IO.String.Input(
"prompt", "prompt",
multiline=True, multiline=True,
default="", default="",
tooltip="Prompt for the image generation - specify what and how to edit.", tooltip="Prompt for the image generation - specify what and how to edit.",
), ),
comfy_io.String.Input( IO.String.Input(
"aspect_ratio", "aspect_ratio",
default="16:9", default="16:9",
tooltip="Aspect ratio of image; must be between 1:4 and 4:1.", tooltip="Aspect ratio of image; must be between 1:4 and 4:1.",
), ),
comfy_io.Float.Input( IO.Float.Input(
"guidance", "guidance",
default=3.0, default=3.0,
min=0.1, min=0.1,
@ -302,14 +302,14 @@ class FluxKontextProImageNode(comfy_io.ComfyNode):
step=0.1, step=0.1,
tooltip="Guidance strength for the image generation process", tooltip="Guidance strength for the image generation process",
), ),
comfy_io.Int.Input( IO.Int.Input(
"steps", "steps",
default=50, default=50,
min=1, min=1,
max=150, max=150,
tooltip="Number of steps for the image generation process", tooltip="Number of steps for the image generation process",
), ),
comfy_io.Int.Input( IO.Int.Input(
"seed", "seed",
default=1234, default=1234,
min=0, min=0,
@ -317,21 +317,21 @@ class FluxKontextProImageNode(comfy_io.ComfyNode):
control_after_generate=True, control_after_generate=True,
tooltip="The random seed used for creating the noise.", tooltip="The random seed used for creating the noise.",
), ),
comfy_io.Boolean.Input( IO.Boolean.Input(
"prompt_upsampling", "prompt_upsampling",
default=False, default=False,
tooltip="Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation, but results are nondeterministic (same seed will not produce exactly the same result).", tooltip="Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation, but results are nondeterministic (same seed will not produce exactly the same result).",
), ),
comfy_io.Image.Input( IO.Image.Input(
"input_image", "input_image",
optional=True, optional=True,
), ),
], ],
outputs=[comfy_io.Image.Output()], outputs=[IO.Image.Output()],
hidden=[ hidden=[
comfy_io.Hidden.auth_token_comfy_org, IO.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org, IO.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id, IO.Hidden.unique_id,
], ],
is_api_node=True, is_api_node=True,
) )
@ -350,7 +350,7 @@ class FluxKontextProImageNode(comfy_io.ComfyNode):
input_image: Optional[torch.Tensor]=None, input_image: Optional[torch.Tensor]=None,
seed=0, seed=0,
prompt_upsampling=False, prompt_upsampling=False,
) -> comfy_io.NodeOutput: ) -> IO.NodeOutput:
aspect_ratio = validate_aspect_ratio( aspect_ratio = validate_aspect_ratio(
aspect_ratio, aspect_ratio,
minimum_ratio=cls.MINIMUM_RATIO, minimum_ratio=cls.MINIMUM_RATIO,
@ -386,7 +386,7 @@ class FluxKontextProImageNode(comfy_io.ComfyNode):
}, },
) )
output_image = await handle_bfl_synchronous_operation(operation, node_id=cls.hidden.unique_id) output_image = await handle_bfl_synchronous_operation(operation, node_id=cls.hidden.unique_id)
return comfy_io.NodeOutput(output_image) return IO.NodeOutput(output_image)
class FluxKontextMaxImageNode(FluxKontextProImageNode): class FluxKontextMaxImageNode(FluxKontextProImageNode):
@ -400,45 +400,45 @@ class FluxKontextMaxImageNode(FluxKontextProImageNode):
DISPLAY_NAME = "Flux.1 Kontext [max] Image" DISPLAY_NAME = "Flux.1 Kontext [max] Image"
class FluxProImageNode(comfy_io.ComfyNode): class FluxProImageNode(IO.ComfyNode):
""" """
Generates images synchronously based on prompt and resolution. Generates images synchronously based on prompt and resolution.
""" """
@classmethod @classmethod
def define_schema(cls) -> comfy_io.Schema: def define_schema(cls) -> IO.Schema:
return comfy_io.Schema( return IO.Schema(
node_id="FluxProImageNode", node_id="FluxProImageNode",
display_name="Flux 1.1 [pro] Image", display_name="Flux 1.1 [pro] Image",
category="api node/image/BFL", category="api node/image/BFL",
description=cleandoc(cls.__doc__ or ""), description=cleandoc(cls.__doc__ or ""),
inputs=[ inputs=[
comfy_io.String.Input( IO.String.Input(
"prompt", "prompt",
multiline=True, multiline=True,
default="", default="",
tooltip="Prompt for the image generation", tooltip="Prompt for the image generation",
), ),
comfy_io.Boolean.Input( IO.Boolean.Input(
"prompt_upsampling", "prompt_upsampling",
default=False, default=False,
tooltip="Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation, but results are nondeterministic (same seed will not produce exactly the same result).", tooltip="Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation, but results are nondeterministic (same seed will not produce exactly the same result).",
), ),
comfy_io.Int.Input( IO.Int.Input(
"width", "width",
default=1024, default=1024,
min=256, min=256,
max=1440, max=1440,
step=32, step=32,
), ),
comfy_io.Int.Input( IO.Int.Input(
"height", "height",
default=768, default=768,
min=256, min=256,
max=1440, max=1440,
step=32, step=32,
), ),
comfy_io.Int.Input( IO.Int.Input(
"seed", "seed",
default=0, default=0,
min=0, min=0,
@ -446,7 +446,7 @@ class FluxProImageNode(comfy_io.ComfyNode):
control_after_generate=True, control_after_generate=True,
tooltip="The random seed used for creating the noise.", tooltip="The random seed used for creating the noise.",
), ),
comfy_io.Image.Input( IO.Image.Input(
"image_prompt", "image_prompt",
optional=True, optional=True,
), ),
@ -461,11 +461,11 @@ class FluxProImageNode(comfy_io.ComfyNode):
# }, # },
# ), # ),
], ],
outputs=[comfy_io.Image.Output()], outputs=[IO.Image.Output()],
hidden=[ hidden=[
comfy_io.Hidden.auth_token_comfy_org, IO.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org, IO.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id, IO.Hidden.unique_id,
], ],
is_api_node=True, is_api_node=True,
) )
@ -480,7 +480,7 @@ class FluxProImageNode(comfy_io.ComfyNode):
seed=0, seed=0,
image_prompt=None, image_prompt=None,
# image_prompt_strength=0.1, # image_prompt_strength=0.1,
) -> comfy_io.NodeOutput: ) -> IO.NodeOutput:
image_prompt = ( image_prompt = (
image_prompt image_prompt
if image_prompt is None if image_prompt is None
@ -508,77 +508,77 @@ class FluxProImageNode(comfy_io.ComfyNode):
}, },
) )
output_image = await handle_bfl_synchronous_operation(operation, node_id=cls.hidden.unique_id) output_image = await handle_bfl_synchronous_operation(operation, node_id=cls.hidden.unique_id)
return comfy_io.NodeOutput(output_image) return IO.NodeOutput(output_image)
class FluxProExpandNode(comfy_io.ComfyNode): class FluxProExpandNode(IO.ComfyNode):
""" """
Outpaints image based on prompt. Outpaints image based on prompt.
""" """
@classmethod @classmethod
def define_schema(cls) -> comfy_io.Schema: def define_schema(cls) -> IO.Schema:
return comfy_io.Schema( return IO.Schema(
node_id="FluxProExpandNode", node_id="FluxProExpandNode",
display_name="Flux.1 Expand Image", display_name="Flux.1 Expand Image",
category="api node/image/BFL", category="api node/image/BFL",
description=cleandoc(cls.__doc__ or ""), description=cleandoc(cls.__doc__ or ""),
inputs=[ inputs=[
comfy_io.Image.Input("image"), IO.Image.Input("image"),
comfy_io.String.Input( IO.String.Input(
"prompt", "prompt",
multiline=True, multiline=True,
default="", default="",
tooltip="Prompt for the image generation", tooltip="Prompt for the image generation",
), ),
comfy_io.Boolean.Input( IO.Boolean.Input(
"prompt_upsampling", "prompt_upsampling",
default=False, default=False,
tooltip="Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation, but results are nondeterministic (same seed will not produce exactly the same result).", tooltip="Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation, but results are nondeterministic (same seed will not produce exactly the same result).",
), ),
comfy_io.Int.Input( IO.Int.Input(
"top", "top",
default=0, default=0,
min=0, min=0,
max=2048, max=2048,
tooltip="Number of pixels to expand at the top of the image", tooltip="Number of pixels to expand at the top of the image",
), ),
comfy_io.Int.Input( IO.Int.Input(
"bottom", "bottom",
default=0, default=0,
min=0, min=0,
max=2048, max=2048,
tooltip="Number of pixels to expand at the bottom of the image", tooltip="Number of pixels to expand at the bottom of the image",
), ),
comfy_io.Int.Input( IO.Int.Input(
"left", "left",
default=0, default=0,
min=0, min=0,
max=2048, max=2048,
tooltip="Number of pixels to expand at the left of the image", tooltip="Number of pixels to expand at the left of the image",
), ),
comfy_io.Int.Input( IO.Int.Input(
"right", "right",
default=0, default=0,
min=0, min=0,
max=2048, max=2048,
tooltip="Number of pixels to expand at the right of the image", tooltip="Number of pixels to expand at the right of the image",
), ),
comfy_io.Float.Input( IO.Float.Input(
"guidance", "guidance",
default=60, default=60,
min=1.5, min=1.5,
max=100, max=100,
tooltip="Guidance strength for the image generation process", tooltip="Guidance strength for the image generation process",
), ),
comfy_io.Int.Input( IO.Int.Input(
"steps", "steps",
default=50, default=50,
min=15, min=15,
max=50, max=50,
tooltip="Number of steps for the image generation process", tooltip="Number of steps for the image generation process",
), ),
comfy_io.Int.Input( IO.Int.Input(
"seed", "seed",
default=0, default=0,
min=0, min=0,
@ -587,11 +587,11 @@ class FluxProExpandNode(comfy_io.ComfyNode):
tooltip="The random seed used for creating the noise.", tooltip="The random seed used for creating the noise.",
), ),
], ],
outputs=[comfy_io.Image.Output()], outputs=[IO.Image.Output()],
hidden=[ hidden=[
comfy_io.Hidden.auth_token_comfy_org, IO.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org, IO.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id, IO.Hidden.unique_id,
], ],
is_api_node=True, is_api_node=True,
) )
@ -609,7 +609,7 @@ class FluxProExpandNode(comfy_io.ComfyNode):
steps: int, steps: int,
guidance: float, guidance: float,
seed=0, seed=0,
) -> comfy_io.NodeOutput: ) -> IO.NodeOutput:
image = convert_image_to_base64(image) image = convert_image_to_base64(image)
operation = SynchronousOperation( operation = SynchronousOperation(
@ -637,51 +637,51 @@ class FluxProExpandNode(comfy_io.ComfyNode):
}, },
) )
output_image = await handle_bfl_synchronous_operation(operation, node_id=cls.hidden.unique_id) output_image = await handle_bfl_synchronous_operation(operation, node_id=cls.hidden.unique_id)
return comfy_io.NodeOutput(output_image) return IO.NodeOutput(output_image)
class FluxProFillNode(comfy_io.ComfyNode): class FluxProFillNode(IO.ComfyNode):
""" """
Inpaints image based on mask and prompt. Inpaints image based on mask and prompt.
""" """
@classmethod @classmethod
def define_schema(cls) -> comfy_io.Schema: def define_schema(cls) -> IO.Schema:
return comfy_io.Schema( return IO.Schema(
node_id="FluxProFillNode", node_id="FluxProFillNode",
display_name="Flux.1 Fill Image", display_name="Flux.1 Fill Image",
category="api node/image/BFL", category="api node/image/BFL",
description=cleandoc(cls.__doc__ or ""), description=cleandoc(cls.__doc__ or ""),
inputs=[ inputs=[
comfy_io.Image.Input("image"), IO.Image.Input("image"),
comfy_io.Mask.Input("mask"), IO.Mask.Input("mask"),
comfy_io.String.Input( IO.String.Input(
"prompt", "prompt",
multiline=True, multiline=True,
default="", default="",
tooltip="Prompt for the image generation", tooltip="Prompt for the image generation",
), ),
comfy_io.Boolean.Input( IO.Boolean.Input(
"prompt_upsampling", "prompt_upsampling",
default=False, default=False,
tooltip="Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation, but results are nondeterministic (same seed will not produce exactly the same result).", tooltip="Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation, but results are nondeterministic (same seed will not produce exactly the same result).",
), ),
comfy_io.Float.Input( IO.Float.Input(
"guidance", "guidance",
default=60, default=60,
min=1.5, min=1.5,
max=100, max=100,
tooltip="Guidance strength for the image generation process", tooltip="Guidance strength for the image generation process",
), ),
comfy_io.Int.Input( IO.Int.Input(
"steps", "steps",
default=50, default=50,
min=15, min=15,
max=50, max=50,
tooltip="Number of steps for the image generation process", tooltip="Number of steps for the image generation process",
), ),
comfy_io.Int.Input( IO.Int.Input(
"seed", "seed",
default=0, default=0,
min=0, min=0,
@ -690,11 +690,11 @@ class FluxProFillNode(comfy_io.ComfyNode):
tooltip="The random seed used for creating the noise.", tooltip="The random seed used for creating the noise.",
), ),
], ],
outputs=[comfy_io.Image.Output()], outputs=[IO.Image.Output()],
hidden=[ hidden=[
comfy_io.Hidden.auth_token_comfy_org, IO.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org, IO.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id, IO.Hidden.unique_id,
], ],
is_api_node=True, is_api_node=True,
) )
@ -709,7 +709,7 @@ class FluxProFillNode(comfy_io.ComfyNode):
steps: int, steps: int,
guidance: float, guidance: float,
seed=0, seed=0,
) -> comfy_io.NodeOutput: ) -> IO.NodeOutput:
# prepare mask # prepare mask
mask = resize_mask_to_image(mask, image) mask = resize_mask_to_image(mask, image)
mask = convert_image_to_base64(convert_mask_to_image(mask)) mask = convert_image_to_base64(convert_mask_to_image(mask))
@ -738,35 +738,35 @@ class FluxProFillNode(comfy_io.ComfyNode):
}, },
) )
output_image = await handle_bfl_synchronous_operation(operation, node_id=cls.hidden.unique_id) output_image = await handle_bfl_synchronous_operation(operation, node_id=cls.hidden.unique_id)
return comfy_io.NodeOutput(output_image) return IO.NodeOutput(output_image)
class FluxProCannyNode(comfy_io.ComfyNode): class FluxProCannyNode(IO.ComfyNode):
""" """
Generate image using a control image (canny). Generate image using a control image (canny).
""" """
@classmethod @classmethod
def define_schema(cls) -> comfy_io.Schema: def define_schema(cls) -> IO.Schema:
return comfy_io.Schema( return IO.Schema(
node_id="FluxProCannyNode", node_id="FluxProCannyNode",
display_name="Flux.1 Canny Control Image", display_name="Flux.1 Canny Control Image",
category="api node/image/BFL", category="api node/image/BFL",
description=cleandoc(cls.__doc__ or ""), description=cleandoc(cls.__doc__ or ""),
inputs=[ inputs=[
comfy_io.Image.Input("control_image"), IO.Image.Input("control_image"),
comfy_io.String.Input( IO.String.Input(
"prompt", "prompt",
multiline=True, multiline=True,
default="", default="",
tooltip="Prompt for the image generation", tooltip="Prompt for the image generation",
), ),
comfy_io.Boolean.Input( IO.Boolean.Input(
"prompt_upsampling", "prompt_upsampling",
default=False, default=False,
tooltip="Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation, but results are nondeterministic (same seed will not produce exactly the same result).", tooltip="Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation, but results are nondeterministic (same seed will not produce exactly the same result).",
), ),
comfy_io.Float.Input( IO.Float.Input(
"canny_low_threshold", "canny_low_threshold",
default=0.1, default=0.1,
min=0.01, min=0.01,
@ -774,7 +774,7 @@ class FluxProCannyNode(comfy_io.ComfyNode):
step=0.01, step=0.01,
tooltip="Low threshold for Canny edge detection; ignored if skip_processing is True", tooltip="Low threshold for Canny edge detection; ignored if skip_processing is True",
), ),
comfy_io.Float.Input( IO.Float.Input(
"canny_high_threshold", "canny_high_threshold",
default=0.4, default=0.4,
min=0.01, min=0.01,
@ -782,26 +782,26 @@ class FluxProCannyNode(comfy_io.ComfyNode):
step=0.01, step=0.01,
tooltip="High threshold for Canny edge detection; ignored if skip_processing is True", tooltip="High threshold for Canny edge detection; ignored if skip_processing is True",
), ),
comfy_io.Boolean.Input( IO.Boolean.Input(
"skip_preprocessing", "skip_preprocessing",
default=False, default=False,
tooltip="Whether to skip preprocessing; set to True if control_image already is canny-fied, False if it is a raw image.", tooltip="Whether to skip preprocessing; set to True if control_image already is canny-fied, False if it is a raw image.",
), ),
comfy_io.Float.Input( IO.Float.Input(
"guidance", "guidance",
default=30, default=30,
min=1, min=1,
max=100, max=100,
tooltip="Guidance strength for the image generation process", tooltip="Guidance strength for the image generation process",
), ),
comfy_io.Int.Input( IO.Int.Input(
"steps", "steps",
default=50, default=50,
min=15, min=15,
max=50, max=50,
tooltip="Number of steps for the image generation process", tooltip="Number of steps for the image generation process",
), ),
comfy_io.Int.Input( IO.Int.Input(
"seed", "seed",
default=0, default=0,
min=0, min=0,
@ -810,11 +810,11 @@ class FluxProCannyNode(comfy_io.ComfyNode):
tooltip="The random seed used for creating the noise.", tooltip="The random seed used for creating the noise.",
), ),
], ],
outputs=[comfy_io.Image.Output()], outputs=[IO.Image.Output()],
hidden=[ hidden=[
comfy_io.Hidden.auth_token_comfy_org, IO.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org, IO.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id, IO.Hidden.unique_id,
], ],
is_api_node=True, is_api_node=True,
) )
@ -831,7 +831,7 @@ class FluxProCannyNode(comfy_io.ComfyNode):
steps: int, steps: int,
guidance: float, guidance: float,
seed=0, seed=0,
) -> comfy_io.NodeOutput: ) -> IO.NodeOutput:
control_image = convert_image_to_base64(control_image[:, :, :, :3]) control_image = convert_image_to_base64(control_image[:, :, :, :3])
preprocessed_image = None preprocessed_image = None
@ -872,54 +872,54 @@ class FluxProCannyNode(comfy_io.ComfyNode):
}, },
) )
output_image = await handle_bfl_synchronous_operation(operation, node_id=cls.hidden.unique_id) output_image = await handle_bfl_synchronous_operation(operation, node_id=cls.hidden.unique_id)
return comfy_io.NodeOutput(output_image) return IO.NodeOutput(output_image)
class FluxProDepthNode(comfy_io.ComfyNode): class FluxProDepthNode(IO.ComfyNode):
""" """
Generate image using a control image (depth). Generate image using a control image (depth).
""" """
@classmethod @classmethod
def define_schema(cls) -> comfy_io.Schema: def define_schema(cls) -> IO.Schema:
return comfy_io.Schema( return IO.Schema(
node_id="FluxProDepthNode", node_id="FluxProDepthNode",
display_name="Flux.1 Depth Control Image", display_name="Flux.1 Depth Control Image",
category="api node/image/BFL", category="api node/image/BFL",
description=cleandoc(cls.__doc__ or ""), description=cleandoc(cls.__doc__ or ""),
inputs=[ inputs=[
comfy_io.Image.Input("control_image"), IO.Image.Input("control_image"),
comfy_io.String.Input( IO.String.Input(
"prompt", "prompt",
multiline=True, multiline=True,
default="", default="",
tooltip="Prompt for the image generation", tooltip="Prompt for the image generation",
), ),
comfy_io.Boolean.Input( IO.Boolean.Input(
"prompt_upsampling", "prompt_upsampling",
default=False, default=False,
tooltip="Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation, but results are nondeterministic (same seed will not produce exactly the same result).", tooltip="Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation, but results are nondeterministic (same seed will not produce exactly the same result).",
), ),
comfy_io.Boolean.Input( IO.Boolean.Input(
"skip_preprocessing", "skip_preprocessing",
default=False, default=False,
tooltip="Whether to skip preprocessing; set to True if control_image already is depth-ified, False if it is a raw image.", tooltip="Whether to skip preprocessing; set to True if control_image already is depth-ified, False if it is a raw image.",
), ),
comfy_io.Float.Input( IO.Float.Input(
"guidance", "guidance",
default=15, default=15,
min=1, min=1,
max=100, max=100,
tooltip="Guidance strength for the image generation process", tooltip="Guidance strength for the image generation process",
), ),
comfy_io.Int.Input( IO.Int.Input(
"steps", "steps",
default=50, default=50,
min=15, min=15,
max=50, max=50,
tooltip="Number of steps for the image generation process", tooltip="Number of steps for the image generation process",
), ),
comfy_io.Int.Input( IO.Int.Input(
"seed", "seed",
default=0, default=0,
min=0, min=0,
@ -928,11 +928,11 @@ class FluxProDepthNode(comfy_io.ComfyNode):
tooltip="The random seed used for creating the noise.", tooltip="The random seed used for creating the noise.",
), ),
], ],
outputs=[comfy_io.Image.Output()], outputs=[IO.Image.Output()],
hidden=[ hidden=[
comfy_io.Hidden.auth_token_comfy_org, IO.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org, IO.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id, IO.Hidden.unique_id,
], ],
is_api_node=True, is_api_node=True,
) )
@ -947,7 +947,7 @@ class FluxProDepthNode(comfy_io.ComfyNode):
steps: int, steps: int,
guidance: float, guidance: float,
seed=0, seed=0,
) -> comfy_io.NodeOutput: ) -> IO.NodeOutput:
control_image = convert_image_to_base64(control_image[:,:,:,:3]) control_image = convert_image_to_base64(control_image[:,:,:,:3])
preprocessed_image = None preprocessed_image = None
@ -977,12 +977,12 @@ class FluxProDepthNode(comfy_io.ComfyNode):
}, },
) )
output_image = await handle_bfl_synchronous_operation(operation, node_id=cls.hidden.unique_id) output_image = await handle_bfl_synchronous_operation(operation, node_id=cls.hidden.unique_id)
return comfy_io.NodeOutput(output_image) return IO.NodeOutput(output_image)
class BFLExtension(ComfyExtension): class BFLExtension(ComfyExtension):
@override @override
async def get_node_list(self) -> list[type[comfy_io.ComfyNode]]: async def get_node_list(self) -> list[type[IO.ComfyNode]]:
return [ return [
FluxProUltraImageNode, FluxProUltraImageNode,
# FluxProImageNode, # FluxProImageNode,

View File

@ -7,7 +7,7 @@ from typing_extensions import override
import torch import torch
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
from comfy_api.latest import ComfyExtension, io as comfy_io from comfy_api.latest import ComfyExtension, IO
from comfy_api_nodes.util.validation_utils import ( from comfy_api_nodes.util.validation_utils import (
validate_image_aspect_ratio_range, validate_image_aspect_ratio_range,
get_number_of_images, get_number_of_images,
@ -237,33 +237,33 @@ async def poll_until_finished(
).execute() ).execute()
class ByteDanceImageNode(comfy_io.ComfyNode): class ByteDanceImageNode(IO.ComfyNode):
@classmethod @classmethod
def define_schema(cls): def define_schema(cls):
return comfy_io.Schema( return IO.Schema(
node_id="ByteDanceImageNode", node_id="ByteDanceImageNode",
display_name="ByteDance Image", display_name="ByteDance Image",
category="api node/image/ByteDance", category="api node/image/ByteDance",
description="Generate images using ByteDance models via api based on prompt", description="Generate images using ByteDance models via api based on prompt",
inputs=[ inputs=[
comfy_io.Combo.Input( IO.Combo.Input(
"model", "model",
options=Text2ImageModelName, options=Text2ImageModelName,
default=Text2ImageModelName.seedream_3, default=Text2ImageModelName.seedream_3,
tooltip="Model name", tooltip="Model name",
), ),
comfy_io.String.Input( IO.String.Input(
"prompt", "prompt",
multiline=True, multiline=True,
tooltip="The text prompt used to generate the image", tooltip="The text prompt used to generate the image",
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"size_preset", "size_preset",
options=[label for label, _, _ in RECOMMENDED_PRESETS], options=[label for label, _, _ in RECOMMENDED_PRESETS],
tooltip="Pick a recommended size. Select Custom to use the width and height below", tooltip="Pick a recommended size. Select Custom to use the width and height below",
), ),
comfy_io.Int.Input( IO.Int.Input(
"width", "width",
default=1024, default=1024,
min=512, min=512,
@ -271,7 +271,7 @@ class ByteDanceImageNode(comfy_io.ComfyNode):
step=64, step=64,
tooltip="Custom width for image. Value is working only if `size_preset` is set to `Custom`", tooltip="Custom width for image. Value is working only if `size_preset` is set to `Custom`",
), ),
comfy_io.Int.Input( IO.Int.Input(
"height", "height",
default=1024, default=1024,
min=512, min=512,
@ -279,28 +279,28 @@ class ByteDanceImageNode(comfy_io.ComfyNode):
step=64, step=64,
tooltip="Custom height for image. Value is working only if `size_preset` is set to `Custom`", tooltip="Custom height for image. Value is working only if `size_preset` is set to `Custom`",
), ),
comfy_io.Int.Input( IO.Int.Input(
"seed", "seed",
default=0, default=0,
min=0, min=0,
max=2147483647, max=2147483647,
step=1, step=1,
display_mode=comfy_io.NumberDisplay.number, display_mode=IO.NumberDisplay.number,
control_after_generate=True, control_after_generate=True,
tooltip="Seed to use for generation", tooltip="Seed to use for generation",
optional=True, optional=True,
), ),
comfy_io.Float.Input( IO.Float.Input(
"guidance_scale", "guidance_scale",
default=2.5, default=2.5,
min=1.0, min=1.0,
max=10.0, max=10.0,
step=0.01, step=0.01,
display_mode=comfy_io.NumberDisplay.number, display_mode=IO.NumberDisplay.number,
tooltip="Higher value makes the image follow the prompt more closely", tooltip="Higher value makes the image follow the prompt more closely",
optional=True, optional=True,
), ),
comfy_io.Boolean.Input( IO.Boolean.Input(
"watermark", "watermark",
default=True, default=True,
tooltip="Whether to add an \"AI generated\" watermark to the image", tooltip="Whether to add an \"AI generated\" watermark to the image",
@ -308,12 +308,12 @@ class ByteDanceImageNode(comfy_io.ComfyNode):
), ),
], ],
outputs=[ outputs=[
comfy_io.Image.Output(), IO.Image.Output(),
], ],
hidden=[ hidden=[
comfy_io.Hidden.auth_token_comfy_org, IO.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org, IO.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id, IO.Hidden.unique_id,
], ],
is_api_node=True, is_api_node=True,
) )
@ -329,7 +329,7 @@ class ByteDanceImageNode(comfy_io.ComfyNode):
seed: int, seed: int,
guidance_scale: float, guidance_scale: float,
watermark: bool, watermark: bool,
) -> comfy_io.NodeOutput: ) -> IO.NodeOutput:
validate_string(prompt, strip_whitespace=True, min_length=1) validate_string(prompt, strip_whitespace=True, min_length=1)
w = h = None w = h = None
for label, tw, th in RECOMMENDED_PRESETS: for label, tw, th in RECOMMENDED_PRESETS:
@ -367,57 +367,57 @@ class ByteDanceImageNode(comfy_io.ComfyNode):
request=payload, request=payload,
auth_kwargs=auth_kwargs, auth_kwargs=auth_kwargs,
).execute() ).execute()
return comfy_io.NodeOutput(await download_url_to_image_tensor(get_image_url_from_response(response))) return IO.NodeOutput(await download_url_to_image_tensor(get_image_url_from_response(response)))
class ByteDanceImageEditNode(comfy_io.ComfyNode): class ByteDanceImageEditNode(IO.ComfyNode):
@classmethod @classmethod
def define_schema(cls): def define_schema(cls):
return comfy_io.Schema( return IO.Schema(
node_id="ByteDanceImageEditNode", node_id="ByteDanceImageEditNode",
display_name="ByteDance Image Edit", display_name="ByteDance Image Edit",
category="api node/image/ByteDance", category="api node/image/ByteDance",
description="Edit images using ByteDance models via api based on prompt", description="Edit images using ByteDance models via api based on prompt",
inputs=[ inputs=[
comfy_io.Combo.Input( IO.Combo.Input(
"model", "model",
options=Image2ImageModelName, options=Image2ImageModelName,
default=Image2ImageModelName.seededit_3, default=Image2ImageModelName.seededit_3,
tooltip="Model name", tooltip="Model name",
), ),
comfy_io.Image.Input( IO.Image.Input(
"image", "image",
tooltip="The base image to edit", tooltip="The base image to edit",
), ),
comfy_io.String.Input( IO.String.Input(
"prompt", "prompt",
multiline=True, multiline=True,
default="", default="",
tooltip="Instruction to edit image", tooltip="Instruction to edit image",
), ),
comfy_io.Int.Input( IO.Int.Input(
"seed", "seed",
default=0, default=0,
min=0, min=0,
max=2147483647, max=2147483647,
step=1, step=1,
display_mode=comfy_io.NumberDisplay.number, display_mode=IO.NumberDisplay.number,
control_after_generate=True, control_after_generate=True,
tooltip="Seed to use for generation", tooltip="Seed to use for generation",
optional=True, optional=True,
), ),
comfy_io.Float.Input( IO.Float.Input(
"guidance_scale", "guidance_scale",
default=5.5, default=5.5,
min=1.0, min=1.0,
max=10.0, max=10.0,
step=0.01, step=0.01,
display_mode=comfy_io.NumberDisplay.number, display_mode=IO.NumberDisplay.number,
tooltip="Higher value makes the image follow the prompt more closely", tooltip="Higher value makes the image follow the prompt more closely",
optional=True, optional=True,
), ),
comfy_io.Boolean.Input( IO.Boolean.Input(
"watermark", "watermark",
default=True, default=True,
tooltip="Whether to add an \"AI generated\" watermark to the image", tooltip="Whether to add an \"AI generated\" watermark to the image",
@ -425,12 +425,12 @@ class ByteDanceImageEditNode(comfy_io.ComfyNode):
), ),
], ],
outputs=[ outputs=[
comfy_io.Image.Output(), IO.Image.Output(),
], ],
hidden=[ hidden=[
comfy_io.Hidden.auth_token_comfy_org, IO.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org, IO.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id, IO.Hidden.unique_id,
], ],
is_api_node=True, is_api_node=True,
) )
@ -444,7 +444,7 @@ class ByteDanceImageEditNode(comfy_io.ComfyNode):
seed: int, seed: int,
guidance_scale: float, guidance_scale: float,
watermark: bool, watermark: bool,
) -> comfy_io.NodeOutput: ) -> IO.NodeOutput:
validate_string(prompt, strip_whitespace=True, min_length=1) validate_string(prompt, strip_whitespace=True, min_length=1)
if get_number_of_images(image) != 1: if get_number_of_images(image) != 1:
raise ValueError("Exactly one input image is required.") raise ValueError("Exactly one input image is required.")
@ -477,42 +477,42 @@ class ByteDanceImageEditNode(comfy_io.ComfyNode):
request=payload, request=payload,
auth_kwargs=auth_kwargs, auth_kwargs=auth_kwargs,
).execute() ).execute()
return comfy_io.NodeOutput(await download_url_to_image_tensor(get_image_url_from_response(response))) return IO.NodeOutput(await download_url_to_image_tensor(get_image_url_from_response(response)))
class ByteDanceSeedreamNode(comfy_io.ComfyNode): class ByteDanceSeedreamNode(IO.ComfyNode):
@classmethod @classmethod
def define_schema(cls): def define_schema(cls):
return comfy_io.Schema( return IO.Schema(
node_id="ByteDanceSeedreamNode", node_id="ByteDanceSeedreamNode",
display_name="ByteDance Seedream 4", display_name="ByteDance Seedream 4",
category="api node/image/ByteDance", category="api node/image/ByteDance",
description="Unified text-to-image generation and precise single-sentence editing at up to 4K resolution.", description="Unified text-to-image generation and precise single-sentence editing at up to 4K resolution.",
inputs=[ inputs=[
comfy_io.Combo.Input( IO.Combo.Input(
"model", "model",
options=["seedream-4-0-250828"], options=["seedream-4-0-250828"],
tooltip="Model name", tooltip="Model name",
), ),
comfy_io.String.Input( IO.String.Input(
"prompt", "prompt",
multiline=True, multiline=True,
default="", default="",
tooltip="Text prompt for creating or editing an image.", tooltip="Text prompt for creating or editing an image.",
), ),
comfy_io.Image.Input( IO.Image.Input(
"image", "image",
tooltip="Input image(s) for image-to-image generation. " tooltip="Input image(s) for image-to-image generation. "
"List of 1-10 images for single or multi-reference generation.", "List of 1-10 images for single or multi-reference generation.",
optional=True, optional=True,
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"size_preset", "size_preset",
options=[label for label, _, _ in RECOMMENDED_PRESETS_SEEDREAM_4], options=[label for label, _, _ in RECOMMENDED_PRESETS_SEEDREAM_4],
tooltip="Pick a recommended size. Select Custom to use the width and height below.", tooltip="Pick a recommended size. Select Custom to use the width and height below.",
), ),
comfy_io.Int.Input( IO.Int.Input(
"width", "width",
default=2048, default=2048,
min=1024, min=1024,
@ -521,7 +521,7 @@ class ByteDanceSeedreamNode(comfy_io.ComfyNode):
tooltip="Custom width for image. Value is working only if `size_preset` is set to `Custom`", tooltip="Custom width for image. Value is working only if `size_preset` is set to `Custom`",
optional=True, optional=True,
), ),
comfy_io.Int.Input( IO.Int.Input(
"height", "height",
default=2048, default=2048,
min=1024, min=1024,
@ -530,7 +530,7 @@ class ByteDanceSeedreamNode(comfy_io.ComfyNode):
tooltip="Custom height for image. Value is working only if `size_preset` is set to `Custom`", tooltip="Custom height for image. Value is working only if `size_preset` is set to `Custom`",
optional=True, optional=True,
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"sequential_image_generation", "sequential_image_generation",
options=["disabled", "auto"], options=["disabled", "auto"],
tooltip="Group image generation mode. " tooltip="Group image generation mode. "
@ -539,35 +539,35 @@ class ByteDanceSeedreamNode(comfy_io.ComfyNode):
"(e.g., story scenes, character variations).", "(e.g., story scenes, character variations).",
optional=True, optional=True,
), ),
comfy_io.Int.Input( IO.Int.Input(
"max_images", "max_images",
default=1, default=1,
min=1, min=1,
max=15, max=15,
step=1, step=1,
display_mode=comfy_io.NumberDisplay.number, display_mode=IO.NumberDisplay.number,
tooltip="Maximum number of images to generate when sequential_image_generation='auto'. " tooltip="Maximum number of images to generate when sequential_image_generation='auto'. "
"Total images (input + generated) cannot exceed 15.", "Total images (input + generated) cannot exceed 15.",
optional=True, optional=True,
), ),
comfy_io.Int.Input( IO.Int.Input(
"seed", "seed",
default=0, default=0,
min=0, min=0,
max=2147483647, max=2147483647,
step=1, step=1,
display_mode=comfy_io.NumberDisplay.number, display_mode=IO.NumberDisplay.number,
control_after_generate=True, control_after_generate=True,
tooltip="Seed to use for generation.", tooltip="Seed to use for generation.",
optional=True, optional=True,
), ),
comfy_io.Boolean.Input( IO.Boolean.Input(
"watermark", "watermark",
default=True, default=True,
tooltip="Whether to add an \"AI generated\" watermark to the image.", tooltip="Whether to add an \"AI generated\" watermark to the image.",
optional=True, optional=True,
), ),
comfy_io.Boolean.Input( IO.Boolean.Input(
"fail_on_partial", "fail_on_partial",
default=True, default=True,
tooltip="If enabled, abort execution if any requested images are missing or return an error.", tooltip="If enabled, abort execution if any requested images are missing or return an error.",
@ -575,12 +575,12 @@ class ByteDanceSeedreamNode(comfy_io.ComfyNode):
), ),
], ],
outputs=[ outputs=[
comfy_io.Image.Output(), IO.Image.Output(),
], ],
hidden=[ hidden=[
comfy_io.Hidden.auth_token_comfy_org, IO.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org, IO.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id, IO.Hidden.unique_id,
], ],
is_api_node=True, is_api_node=True,
) )
@ -599,7 +599,7 @@ class ByteDanceSeedreamNode(comfy_io.ComfyNode):
seed: int = 0, seed: int = 0,
watermark: bool = True, watermark: bool = True,
fail_on_partial: bool = True, fail_on_partial: bool = True,
) -> comfy_io.NodeOutput: ) -> IO.NodeOutput:
validate_string(prompt, strip_whitespace=True, min_length=1) validate_string(prompt, strip_whitespace=True, min_length=1)
w = h = None w = h = None
for label, tw, th in RECOMMENDED_PRESETS_SEEDREAM_4: for label, tw, th in RECOMMENDED_PRESETS_SEEDREAM_4:
@ -657,72 +657,72 @@ class ByteDanceSeedreamNode(comfy_io.ComfyNode):
).execute() ).execute()
if len(response.data) == 1: if len(response.data) == 1:
return comfy_io.NodeOutput(await download_url_to_image_tensor(get_image_url_from_response(response))) return IO.NodeOutput(await download_url_to_image_tensor(get_image_url_from_response(response)))
urls = [str(d["url"]) for d in response.data if isinstance(d, dict) and "url" in d] urls = [str(d["url"]) for d in response.data if isinstance(d, dict) and "url" in d]
if fail_on_partial and len(urls) < len(response.data): if fail_on_partial and len(urls) < len(response.data):
raise RuntimeError(f"Only {len(urls)} of {len(response.data)} images were generated before error.") raise RuntimeError(f"Only {len(urls)} of {len(response.data)} images were generated before error.")
return comfy_io.NodeOutput(torch.cat([await download_url_to_image_tensor(i) for i in urls])) return IO.NodeOutput(torch.cat([await download_url_to_image_tensor(i) for i in urls]))
class ByteDanceTextToVideoNode(comfy_io.ComfyNode): class ByteDanceTextToVideoNode(IO.ComfyNode):
@classmethod @classmethod
def define_schema(cls): def define_schema(cls):
return comfy_io.Schema( return IO.Schema(
node_id="ByteDanceTextToVideoNode", node_id="ByteDanceTextToVideoNode",
display_name="ByteDance Text to Video", display_name="ByteDance Text to Video",
category="api node/video/ByteDance", category="api node/video/ByteDance",
description="Generate video using ByteDance models via api based on prompt", description="Generate video using ByteDance models via api based on prompt",
inputs=[ inputs=[
comfy_io.Combo.Input( IO.Combo.Input(
"model", "model",
options=Text2VideoModelName, options=Text2VideoModelName,
default=Text2VideoModelName.seedance_1_pro, default=Text2VideoModelName.seedance_1_pro,
tooltip="Model name", tooltip="Model name",
), ),
comfy_io.String.Input( IO.String.Input(
"prompt", "prompt",
multiline=True, multiline=True,
tooltip="The text prompt used to generate the video.", tooltip="The text prompt used to generate the video.",
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"resolution", "resolution",
options=["480p", "720p", "1080p"], options=["480p", "720p", "1080p"],
tooltip="The resolution of the output video.", tooltip="The resolution of the output video.",
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"aspect_ratio", "aspect_ratio",
options=["16:9", "4:3", "1:1", "3:4", "9:16", "21:9"], options=["16:9", "4:3", "1:1", "3:4", "9:16", "21:9"],
tooltip="The aspect ratio of the output video.", tooltip="The aspect ratio of the output video.",
), ),
comfy_io.Int.Input( IO.Int.Input(
"duration", "duration",
default=5, default=5,
min=3, min=3,
max=12, max=12,
step=1, step=1,
tooltip="The duration of the output video in seconds.", tooltip="The duration of the output video in seconds.",
display_mode=comfy_io.NumberDisplay.slider, display_mode=IO.NumberDisplay.slider,
), ),
comfy_io.Int.Input( IO.Int.Input(
"seed", "seed",
default=0, default=0,
min=0, min=0,
max=2147483647, max=2147483647,
step=1, step=1,
display_mode=comfy_io.NumberDisplay.number, display_mode=IO.NumberDisplay.number,
control_after_generate=True, control_after_generate=True,
tooltip="Seed to use for generation.", tooltip="Seed to use for generation.",
optional=True, optional=True,
), ),
comfy_io.Boolean.Input( IO.Boolean.Input(
"camera_fixed", "camera_fixed",
default=False, default=False,
tooltip="Specifies whether to fix the camera. The platform appends an instruction " tooltip="Specifies whether to fix the camera. The platform appends an instruction "
"to fix the camera to your prompt, but does not guarantee the actual effect.", "to fix the camera to your prompt, but does not guarantee the actual effect.",
optional=True, optional=True,
), ),
comfy_io.Boolean.Input( IO.Boolean.Input(
"watermark", "watermark",
default=True, default=True,
tooltip="Whether to add an \"AI generated\" watermark to the video.", tooltip="Whether to add an \"AI generated\" watermark to the video.",
@ -730,12 +730,12 @@ class ByteDanceTextToVideoNode(comfy_io.ComfyNode):
), ),
], ],
outputs=[ outputs=[
comfy_io.Video.Output(), IO.Video.Output(),
], ],
hidden=[ hidden=[
comfy_io.Hidden.auth_token_comfy_org, IO.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org, IO.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id, IO.Hidden.unique_id,
], ],
is_api_node=True, is_api_node=True,
) )
@ -751,7 +751,7 @@ class ByteDanceTextToVideoNode(comfy_io.ComfyNode):
seed: int, seed: int,
camera_fixed: bool, camera_fixed: bool,
watermark: bool, watermark: bool,
) -> comfy_io.NodeOutput: ) -> IO.NodeOutput:
validate_string(prompt, strip_whitespace=True, min_length=1) validate_string(prompt, strip_whitespace=True, min_length=1)
raise_if_text_params(prompt, ["resolution", "ratio", "duration", "seed", "camerafixed", "watermark"]) raise_if_text_params(prompt, ["resolution", "ratio", "duration", "seed", "camerafixed", "watermark"])
@ -781,69 +781,69 @@ class ByteDanceTextToVideoNode(comfy_io.ComfyNode):
) )
class ByteDanceImageToVideoNode(comfy_io.ComfyNode): class ByteDanceImageToVideoNode(IO.ComfyNode):
@classmethod @classmethod
def define_schema(cls): def define_schema(cls):
return comfy_io.Schema( return IO.Schema(
node_id="ByteDanceImageToVideoNode", node_id="ByteDanceImageToVideoNode",
display_name="ByteDance Image to Video", display_name="ByteDance Image to Video",
category="api node/video/ByteDance", category="api node/video/ByteDance",
description="Generate video using ByteDance models via api based on image and prompt", description="Generate video using ByteDance models via api based on image and prompt",
inputs=[ inputs=[
comfy_io.Combo.Input( IO.Combo.Input(
"model", "model",
options=Image2VideoModelName, options=Image2VideoModelName,
default=Image2VideoModelName.seedance_1_pro, default=Image2VideoModelName.seedance_1_pro,
tooltip="Model name", tooltip="Model name",
), ),
comfy_io.String.Input( IO.String.Input(
"prompt", "prompt",
multiline=True, multiline=True,
tooltip="The text prompt used to generate the video.", tooltip="The text prompt used to generate the video.",
), ),
comfy_io.Image.Input( IO.Image.Input(
"image", "image",
tooltip="First frame to be used for the video.", tooltip="First frame to be used for the video.",
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"resolution", "resolution",
options=["480p", "720p", "1080p"], options=["480p", "720p", "1080p"],
tooltip="The resolution of the output video.", tooltip="The resolution of the output video.",
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"aspect_ratio", "aspect_ratio",
options=["adaptive", "16:9", "4:3", "1:1", "3:4", "9:16", "21:9"], options=["adaptive", "16:9", "4:3", "1:1", "3:4", "9:16", "21:9"],
tooltip="The aspect ratio of the output video.", tooltip="The aspect ratio of the output video.",
), ),
comfy_io.Int.Input( IO.Int.Input(
"duration", "duration",
default=5, default=5,
min=3, min=3,
max=12, max=12,
step=1, step=1,
tooltip="The duration of the output video in seconds.", tooltip="The duration of the output video in seconds.",
display_mode=comfy_io.NumberDisplay.slider, display_mode=IO.NumberDisplay.slider,
), ),
comfy_io.Int.Input( IO.Int.Input(
"seed", "seed",
default=0, default=0,
min=0, min=0,
max=2147483647, max=2147483647,
step=1, step=1,
display_mode=comfy_io.NumberDisplay.number, display_mode=IO.NumberDisplay.number,
control_after_generate=True, control_after_generate=True,
tooltip="Seed to use for generation.", tooltip="Seed to use for generation.",
optional=True, optional=True,
), ),
comfy_io.Boolean.Input( IO.Boolean.Input(
"camera_fixed", "camera_fixed",
default=False, default=False,
tooltip="Specifies whether to fix the camera. The platform appends an instruction " tooltip="Specifies whether to fix the camera. The platform appends an instruction "
"to fix the camera to your prompt, but does not guarantee the actual effect.", "to fix the camera to your prompt, but does not guarantee the actual effect.",
optional=True, optional=True,
), ),
comfy_io.Boolean.Input( IO.Boolean.Input(
"watermark", "watermark",
default=True, default=True,
tooltip="Whether to add an \"AI generated\" watermark to the video.", tooltip="Whether to add an \"AI generated\" watermark to the video.",
@ -851,12 +851,12 @@ class ByteDanceImageToVideoNode(comfy_io.ComfyNode):
), ),
], ],
outputs=[ outputs=[
comfy_io.Video.Output(), IO.Video.Output(),
], ],
hidden=[ hidden=[
comfy_io.Hidden.auth_token_comfy_org, IO.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org, IO.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id, IO.Hidden.unique_id,
], ],
is_api_node=True, is_api_node=True,
) )
@ -873,7 +873,7 @@ class ByteDanceImageToVideoNode(comfy_io.ComfyNode):
seed: int, seed: int,
camera_fixed: bool, camera_fixed: bool,
watermark: bool, watermark: bool,
) -> comfy_io.NodeOutput: ) -> IO.NodeOutput:
validate_string(prompt, strip_whitespace=True, min_length=1) validate_string(prompt, strip_whitespace=True, min_length=1)
raise_if_text_params(prompt, ["resolution", "ratio", "duration", "seed", "camerafixed", "watermark"]) raise_if_text_params(prompt, ["resolution", "ratio", "duration", "seed", "camerafixed", "watermark"])
validate_image_dimensions(image, min_width=300, min_height=300, max_width=6000, max_height=6000) validate_image_dimensions(image, min_width=300, min_height=300, max_width=6000, max_height=6000)
@ -908,73 +908,73 @@ class ByteDanceImageToVideoNode(comfy_io.ComfyNode):
) )
class ByteDanceFirstLastFrameNode(comfy_io.ComfyNode): class ByteDanceFirstLastFrameNode(IO.ComfyNode):
@classmethod @classmethod
def define_schema(cls): def define_schema(cls):
return comfy_io.Schema( return IO.Schema(
node_id="ByteDanceFirstLastFrameNode", node_id="ByteDanceFirstLastFrameNode",
display_name="ByteDance First-Last-Frame to Video", display_name="ByteDance First-Last-Frame to Video",
category="api node/video/ByteDance", category="api node/video/ByteDance",
description="Generate video using prompt and first and last frames.", description="Generate video using prompt and first and last frames.",
inputs=[ inputs=[
comfy_io.Combo.Input( IO.Combo.Input(
"model", "model",
options=[model.value for model in Image2VideoModelName], options=[model.value for model in Image2VideoModelName],
default=Image2VideoModelName.seedance_1_lite.value, default=Image2VideoModelName.seedance_1_lite.value,
tooltip="Model name", tooltip="Model name",
), ),
comfy_io.String.Input( IO.String.Input(
"prompt", "prompt",
multiline=True, multiline=True,
tooltip="The text prompt used to generate the video.", tooltip="The text prompt used to generate the video.",
), ),
comfy_io.Image.Input( IO.Image.Input(
"first_frame", "first_frame",
tooltip="First frame to be used for the video.", tooltip="First frame to be used for the video.",
), ),
comfy_io.Image.Input( IO.Image.Input(
"last_frame", "last_frame",
tooltip="Last frame to be used for the video.", tooltip="Last frame to be used for the video.",
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"resolution", "resolution",
options=["480p", "720p", "1080p"], options=["480p", "720p", "1080p"],
tooltip="The resolution of the output video.", tooltip="The resolution of the output video.",
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"aspect_ratio", "aspect_ratio",
options=["adaptive", "16:9", "4:3", "1:1", "3:4", "9:16", "21:9"], options=["adaptive", "16:9", "4:3", "1:1", "3:4", "9:16", "21:9"],
tooltip="The aspect ratio of the output video.", tooltip="The aspect ratio of the output video.",
), ),
comfy_io.Int.Input( IO.Int.Input(
"duration", "duration",
default=5, default=5,
min=3, min=3,
max=12, max=12,
step=1, step=1,
tooltip="The duration of the output video in seconds.", tooltip="The duration of the output video in seconds.",
display_mode=comfy_io.NumberDisplay.slider, display_mode=IO.NumberDisplay.slider,
), ),
comfy_io.Int.Input( IO.Int.Input(
"seed", "seed",
default=0, default=0,
min=0, min=0,
max=2147483647, max=2147483647,
step=1, step=1,
display_mode=comfy_io.NumberDisplay.number, display_mode=IO.NumberDisplay.number,
control_after_generate=True, control_after_generate=True,
tooltip="Seed to use for generation.", tooltip="Seed to use for generation.",
optional=True, optional=True,
), ),
comfy_io.Boolean.Input( IO.Boolean.Input(
"camera_fixed", "camera_fixed",
default=False, default=False,
tooltip="Specifies whether to fix the camera. The platform appends an instruction " tooltip="Specifies whether to fix the camera. The platform appends an instruction "
"to fix the camera to your prompt, but does not guarantee the actual effect.", "to fix the camera to your prompt, but does not guarantee the actual effect.",
optional=True, optional=True,
), ),
comfy_io.Boolean.Input( IO.Boolean.Input(
"watermark", "watermark",
default=True, default=True,
tooltip="Whether to add an \"AI generated\" watermark to the video.", tooltip="Whether to add an \"AI generated\" watermark to the video.",
@ -982,12 +982,12 @@ class ByteDanceFirstLastFrameNode(comfy_io.ComfyNode):
), ),
], ],
outputs=[ outputs=[
comfy_io.Video.Output(), IO.Video.Output(),
], ],
hidden=[ hidden=[
comfy_io.Hidden.auth_token_comfy_org, IO.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org, IO.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id, IO.Hidden.unique_id,
], ],
is_api_node=True, is_api_node=True,
) )
@ -1005,7 +1005,7 @@ class ByteDanceFirstLastFrameNode(comfy_io.ComfyNode):
seed: int, seed: int,
camera_fixed: bool, camera_fixed: bool,
watermark: bool, watermark: bool,
) -> comfy_io.NodeOutput: ) -> IO.NodeOutput:
validate_string(prompt, strip_whitespace=True, min_length=1) validate_string(prompt, strip_whitespace=True, min_length=1)
raise_if_text_params(prompt, ["resolution", "ratio", "duration", "seed", "camerafixed", "watermark"]) raise_if_text_params(prompt, ["resolution", "ratio", "duration", "seed", "camerafixed", "watermark"])
for i in (first_frame, last_frame): for i in (first_frame, last_frame):
@ -1050,62 +1050,62 @@ class ByteDanceFirstLastFrameNode(comfy_io.ComfyNode):
) )
class ByteDanceImageReferenceNode(comfy_io.ComfyNode): class ByteDanceImageReferenceNode(IO.ComfyNode):
@classmethod @classmethod
def define_schema(cls): def define_schema(cls):
return comfy_io.Schema( return IO.Schema(
node_id="ByteDanceImageReferenceNode", node_id="ByteDanceImageReferenceNode",
display_name="ByteDance Reference Images to Video", display_name="ByteDance Reference Images to Video",
category="api node/video/ByteDance", category="api node/video/ByteDance",
description="Generate video using prompt and reference images.", description="Generate video using prompt and reference images.",
inputs=[ inputs=[
comfy_io.Combo.Input( IO.Combo.Input(
"model", "model",
options=[Image2VideoModelName.seedance_1_lite.value], options=[Image2VideoModelName.seedance_1_lite.value],
default=Image2VideoModelName.seedance_1_lite.value, default=Image2VideoModelName.seedance_1_lite.value,
tooltip="Model name", tooltip="Model name",
), ),
comfy_io.String.Input( IO.String.Input(
"prompt", "prompt",
multiline=True, multiline=True,
tooltip="The text prompt used to generate the video.", tooltip="The text prompt used to generate the video.",
), ),
comfy_io.Image.Input( IO.Image.Input(
"images", "images",
tooltip="One to four images.", tooltip="One to four images.",
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"resolution", "resolution",
options=["480p", "720p"], options=["480p", "720p"],
tooltip="The resolution of the output video.", tooltip="The resolution of the output video.",
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"aspect_ratio", "aspect_ratio",
options=["adaptive", "16:9", "4:3", "1:1", "3:4", "9:16", "21:9"], options=["adaptive", "16:9", "4:3", "1:1", "3:4", "9:16", "21:9"],
tooltip="The aspect ratio of the output video.", tooltip="The aspect ratio of the output video.",
), ),
comfy_io.Int.Input( IO.Int.Input(
"duration", "duration",
default=5, default=5,
min=3, min=3,
max=12, max=12,
step=1, step=1,
tooltip="The duration of the output video in seconds.", tooltip="The duration of the output video in seconds.",
display_mode=comfy_io.NumberDisplay.slider, display_mode=IO.NumberDisplay.slider,
), ),
comfy_io.Int.Input( IO.Int.Input(
"seed", "seed",
default=0, default=0,
min=0, min=0,
max=2147483647, max=2147483647,
step=1, step=1,
display_mode=comfy_io.NumberDisplay.number, display_mode=IO.NumberDisplay.number,
control_after_generate=True, control_after_generate=True,
tooltip="Seed to use for generation.", tooltip="Seed to use for generation.",
optional=True, optional=True,
), ),
comfy_io.Boolean.Input( IO.Boolean.Input(
"watermark", "watermark",
default=True, default=True,
tooltip="Whether to add an \"AI generated\" watermark to the video.", tooltip="Whether to add an \"AI generated\" watermark to the video.",
@ -1113,12 +1113,12 @@ class ByteDanceImageReferenceNode(comfy_io.ComfyNode):
), ),
], ],
outputs=[ outputs=[
comfy_io.Video.Output(), IO.Video.Output(),
], ],
hidden=[ hidden=[
comfy_io.Hidden.auth_token_comfy_org, IO.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org, IO.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id, IO.Hidden.unique_id,
], ],
is_api_node=True, is_api_node=True,
) )
@ -1134,7 +1134,7 @@ class ByteDanceImageReferenceNode(comfy_io.ComfyNode):
duration: int, duration: int,
seed: int, seed: int,
watermark: bool, watermark: bool,
) -> comfy_io.NodeOutput: ) -> IO.NodeOutput:
validate_string(prompt, strip_whitespace=True, min_length=1) validate_string(prompt, strip_whitespace=True, min_length=1)
raise_if_text_params(prompt, ["resolution", "ratio", "duration", "seed", "watermark"]) raise_if_text_params(prompt, ["resolution", "ratio", "duration", "seed", "watermark"])
for image in images: for image in images:
@ -1180,7 +1180,7 @@ async def process_video_task(
auth_kwargs: dict, auth_kwargs: dict,
node_id: str, node_id: str,
estimated_duration: Optional[int], estimated_duration: Optional[int],
) -> comfy_io.NodeOutput: ) -> IO.NodeOutput:
initial_response = await SynchronousOperation( initial_response = await SynchronousOperation(
endpoint=ApiEndpoint( endpoint=ApiEndpoint(
path=BYTEPLUS_TASK_ENDPOINT, path=BYTEPLUS_TASK_ENDPOINT,
@ -1197,7 +1197,7 @@ async def process_video_task(
estimated_duration=estimated_duration, estimated_duration=estimated_duration,
node_id=node_id, node_id=node_id,
) )
return comfy_io.NodeOutput(await download_url_to_video_output(get_video_url_from_task_status(response))) return IO.NodeOutput(await download_url_to_video_output(get_video_url_from_task_status(response)))
def raise_if_text_params(prompt: str, text_params: list[str]) -> None: def raise_if_text_params(prompt: str, text_params: list[str]) -> None:
@ -1210,7 +1210,7 @@ def raise_if_text_params(prompt: str, text_params: list[str]) -> None:
class ByteDanceExtension(ComfyExtension): class ByteDanceExtension(ComfyExtension):
@override @override
async def get_node_list(self) -> list[type[comfy_io.ComfyNode]]: async def get_node_list(self) -> list[type[IO.ComfyNode]]:
return [ return [
ByteDanceImageNode, ByteDanceImageNode,
ByteDanceImageEditNode, ByteDanceImageEditNode,

View File

@ -1,6 +1,6 @@
from io import BytesIO from io import BytesIO
from typing_extensions import override from typing_extensions import override
from comfy_api.latest import ComfyExtension, io as comfy_io from comfy_api.latest import ComfyExtension, IO
from PIL import Image from PIL import Image
import numpy as np import numpy as np
import torch import torch
@ -246,76 +246,76 @@ def display_image_urls_on_node(image_urls, node_id):
PromptServer.instance.send_progress_text(urls_text, node_id) PromptServer.instance.send_progress_text(urls_text, node_id)
class IdeogramV1(comfy_io.ComfyNode): class IdeogramV1(IO.ComfyNode):
@classmethod @classmethod
def define_schema(cls): def define_schema(cls):
return comfy_io.Schema( return IO.Schema(
node_id="IdeogramV1", node_id="IdeogramV1",
display_name="Ideogram V1", display_name="Ideogram V1",
category="api node/image/Ideogram", category="api node/image/Ideogram",
description="Generates images using the Ideogram V1 model.", description="Generates images using the Ideogram V1 model.",
is_api_node=True, is_api_node=True,
inputs=[ inputs=[
comfy_io.String.Input( IO.String.Input(
"prompt", "prompt",
multiline=True, multiline=True,
default="", default="",
tooltip="Prompt for the image generation", tooltip="Prompt for the image generation",
), ),
comfy_io.Boolean.Input( IO.Boolean.Input(
"turbo", "turbo",
default=False, default=False,
tooltip="Whether to use turbo mode (faster generation, potentially lower quality)", tooltip="Whether to use turbo mode (faster generation, potentially lower quality)",
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"aspect_ratio", "aspect_ratio",
options=list(V1_V2_RATIO_MAP.keys()), options=list(V1_V2_RATIO_MAP.keys()),
default="1:1", default="1:1",
tooltip="The aspect ratio for image generation.", tooltip="The aspect ratio for image generation.",
optional=True, optional=True,
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"magic_prompt_option", "magic_prompt_option",
options=["AUTO", "ON", "OFF"], options=["AUTO", "ON", "OFF"],
default="AUTO", default="AUTO",
tooltip="Determine if MagicPrompt should be used in generation", tooltip="Determine if MagicPrompt should be used in generation",
optional=True, optional=True,
), ),
comfy_io.Int.Input( IO.Int.Input(
"seed", "seed",
default=0, default=0,
min=0, min=0,
max=2147483647, max=2147483647,
step=1, step=1,
control_after_generate=True, control_after_generate=True,
display_mode=comfy_io.NumberDisplay.number, display_mode=IO.NumberDisplay.number,
optional=True, optional=True,
), ),
comfy_io.String.Input( IO.String.Input(
"negative_prompt", "negative_prompt",
multiline=True, multiline=True,
default="", default="",
tooltip="Description of what to exclude from the image", tooltip="Description of what to exclude from the image",
optional=True, optional=True,
), ),
comfy_io.Int.Input( IO.Int.Input(
"num_images", "num_images",
default=1, default=1,
min=1, min=1,
max=8, max=8,
step=1, step=1,
display_mode=comfy_io.NumberDisplay.number, display_mode=IO.NumberDisplay.number,
optional=True, optional=True,
), ),
], ],
outputs=[ outputs=[
comfy_io.Image.Output(), IO.Image.Output(),
], ],
hidden=[ hidden=[
comfy_io.Hidden.auth_token_comfy_org, IO.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org, IO.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id, IO.Hidden.unique_id,
], ],
) )
@ -372,39 +372,39 @@ class IdeogramV1(comfy_io.ComfyNode):
raise Exception("No image URLs were generated in the response") raise Exception("No image URLs were generated in the response")
display_image_urls_on_node(image_urls, cls.hidden.unique_id) display_image_urls_on_node(image_urls, cls.hidden.unique_id)
return comfy_io.NodeOutput(await download_and_process_images(image_urls)) return IO.NodeOutput(await download_and_process_images(image_urls))
class IdeogramV2(comfy_io.ComfyNode): class IdeogramV2(IO.ComfyNode):
@classmethod @classmethod
def define_schema(cls): def define_schema(cls):
return comfy_io.Schema( return IO.Schema(
node_id="IdeogramV2", node_id="IdeogramV2",
display_name="Ideogram V2", display_name="Ideogram V2",
category="api node/image/Ideogram", category="api node/image/Ideogram",
description="Generates images using the Ideogram V2 model.", description="Generates images using the Ideogram V2 model.",
is_api_node=True, is_api_node=True,
inputs=[ inputs=[
comfy_io.String.Input( IO.String.Input(
"prompt", "prompt",
multiline=True, multiline=True,
default="", default="",
tooltip="Prompt for the image generation", tooltip="Prompt for the image generation",
), ),
comfy_io.Boolean.Input( IO.Boolean.Input(
"turbo", "turbo",
default=False, default=False,
tooltip="Whether to use turbo mode (faster generation, potentially lower quality)", tooltip="Whether to use turbo mode (faster generation, potentially lower quality)",
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"aspect_ratio", "aspect_ratio",
options=list(V1_V2_RATIO_MAP.keys()), options=list(V1_V2_RATIO_MAP.keys()),
default="1:1", default="1:1",
tooltip="The aspect ratio for image generation. Ignored if resolution is not set to AUTO.", tooltip="The aspect ratio for image generation. Ignored if resolution is not set to AUTO.",
optional=True, optional=True,
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"resolution", "resolution",
options=list(V1_V1_RES_MAP.keys()), options=list(V1_V1_RES_MAP.keys()),
default="Auto", default="Auto",
@ -412,44 +412,44 @@ class IdeogramV2(comfy_io.ComfyNode):
"If not set to AUTO, this overrides the aspect_ratio setting.", "If not set to AUTO, this overrides the aspect_ratio setting.",
optional=True, optional=True,
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"magic_prompt_option", "magic_prompt_option",
options=["AUTO", "ON", "OFF"], options=["AUTO", "ON", "OFF"],
default="AUTO", default="AUTO",
tooltip="Determine if MagicPrompt should be used in generation", tooltip="Determine if MagicPrompt should be used in generation",
optional=True, optional=True,
), ),
comfy_io.Int.Input( IO.Int.Input(
"seed", "seed",
default=0, default=0,
min=0, min=0,
max=2147483647, max=2147483647,
step=1, step=1,
control_after_generate=True, control_after_generate=True,
display_mode=comfy_io.NumberDisplay.number, display_mode=IO.NumberDisplay.number,
optional=True, optional=True,
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"style_type", "style_type",
options=["AUTO", "GENERAL", "REALISTIC", "DESIGN", "RENDER_3D", "ANIME"], options=["AUTO", "GENERAL", "REALISTIC", "DESIGN", "RENDER_3D", "ANIME"],
default="NONE", default="NONE",
tooltip="Style type for generation (V2 only)", tooltip="Style type for generation (V2 only)",
optional=True, optional=True,
), ),
comfy_io.String.Input( IO.String.Input(
"negative_prompt", "negative_prompt",
multiline=True, multiline=True,
default="", default="",
tooltip="Description of what to exclude from the image", tooltip="Description of what to exclude from the image",
optional=True, optional=True,
), ),
comfy_io.Int.Input( IO.Int.Input(
"num_images", "num_images",
default=1, default=1,
min=1, min=1,
max=8, max=8,
step=1, step=1,
display_mode=comfy_io.NumberDisplay.number, display_mode=IO.NumberDisplay.number,
optional=True, optional=True,
), ),
#"color_palette": ( #"color_palette": (
@ -462,12 +462,12 @@ class IdeogramV2(comfy_io.ComfyNode):
#), #),
], ],
outputs=[ outputs=[
comfy_io.Image.Output(), IO.Image.Output(),
], ],
hidden=[ hidden=[
comfy_io.Hidden.auth_token_comfy_org, IO.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org, IO.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id, IO.Hidden.unique_id,
], ],
) )
@ -541,14 +541,14 @@ class IdeogramV2(comfy_io.ComfyNode):
raise Exception("No image URLs were generated in the response") raise Exception("No image URLs were generated in the response")
display_image_urls_on_node(image_urls, cls.hidden.unique_id) display_image_urls_on_node(image_urls, cls.hidden.unique_id)
return comfy_io.NodeOutput(await download_and_process_images(image_urls)) return IO.NodeOutput(await download_and_process_images(image_urls))
class IdeogramV3(comfy_io.ComfyNode): class IdeogramV3(IO.ComfyNode):
@classmethod @classmethod
def define_schema(cls): def define_schema(cls):
return comfy_io.Schema( return IO.Schema(
node_id="IdeogramV3", node_id="IdeogramV3",
display_name="Ideogram V3", display_name="Ideogram V3",
category="api node/image/Ideogram", category="api node/image/Ideogram",
@ -556,30 +556,30 @@ class IdeogramV3(comfy_io.ComfyNode):
"Supports both regular image generation from text prompts and image editing with mask.", "Supports both regular image generation from text prompts and image editing with mask.",
is_api_node=True, is_api_node=True,
inputs=[ inputs=[
comfy_io.String.Input( IO.String.Input(
"prompt", "prompt",
multiline=True, multiline=True,
default="", default="",
tooltip="Prompt for the image generation or editing", tooltip="Prompt for the image generation or editing",
), ),
comfy_io.Image.Input( IO.Image.Input(
"image", "image",
tooltip="Optional reference image for image editing.", tooltip="Optional reference image for image editing.",
optional=True, optional=True,
), ),
comfy_io.Mask.Input( IO.Mask.Input(
"mask", "mask",
tooltip="Optional mask for inpainting (white areas will be replaced)", tooltip="Optional mask for inpainting (white areas will be replaced)",
optional=True, optional=True,
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"aspect_ratio", "aspect_ratio",
options=list(V3_RATIO_MAP.keys()), options=list(V3_RATIO_MAP.keys()),
default="1:1", default="1:1",
tooltip="The aspect ratio for image generation. Ignored if resolution is not set to Auto.", tooltip="The aspect ratio for image generation. Ignored if resolution is not set to Auto.",
optional=True, optional=True,
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"resolution", "resolution",
options=V3_RESOLUTIONS, options=V3_RESOLUTIONS,
default="Auto", default="Auto",
@ -587,57 +587,57 @@ class IdeogramV3(comfy_io.ComfyNode):
"If not set to Auto, this overrides the aspect_ratio setting.", "If not set to Auto, this overrides the aspect_ratio setting.",
optional=True, optional=True,
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"magic_prompt_option", "magic_prompt_option",
options=["AUTO", "ON", "OFF"], options=["AUTO", "ON", "OFF"],
default="AUTO", default="AUTO",
tooltip="Determine if MagicPrompt should be used in generation", tooltip="Determine if MagicPrompt should be used in generation",
optional=True, optional=True,
), ),
comfy_io.Int.Input( IO.Int.Input(
"seed", "seed",
default=0, default=0,
min=0, min=0,
max=2147483647, max=2147483647,
step=1, step=1,
control_after_generate=True, control_after_generate=True,
display_mode=comfy_io.NumberDisplay.number, display_mode=IO.NumberDisplay.number,
optional=True, optional=True,
), ),
comfy_io.Int.Input( IO.Int.Input(
"num_images", "num_images",
default=1, default=1,
min=1, min=1,
max=8, max=8,
step=1, step=1,
display_mode=comfy_io.NumberDisplay.number, display_mode=IO.NumberDisplay.number,
optional=True, optional=True,
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"rendering_speed", "rendering_speed",
options=["DEFAULT", "TURBO", "QUALITY"], options=["DEFAULT", "TURBO", "QUALITY"],
default="DEFAULT", default="DEFAULT",
tooltip="Controls the trade-off between generation speed and quality", tooltip="Controls the trade-off between generation speed and quality",
optional=True, optional=True,
), ),
comfy_io.Image.Input( IO.Image.Input(
"character_image", "character_image",
tooltip="Image to use as character reference.", tooltip="Image to use as character reference.",
optional=True, optional=True,
), ),
comfy_io.Mask.Input( IO.Mask.Input(
"character_mask", "character_mask",
tooltip="Optional mask for character reference image.", tooltip="Optional mask for character reference image.",
optional=True, optional=True,
), ),
], ],
outputs=[ outputs=[
comfy_io.Image.Output(), IO.Image.Output(),
], ],
hidden=[ hidden=[
comfy_io.Hidden.auth_token_comfy_org, IO.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org, IO.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id, IO.Hidden.unique_id,
], ],
) )
@ -826,12 +826,12 @@ class IdeogramV3(comfy_io.ComfyNode):
raise Exception("No image URLs were generated in the response") raise Exception("No image URLs were generated in the response")
display_image_urls_on_node(image_urls, cls.hidden.unique_id) display_image_urls_on_node(image_urls, cls.hidden.unique_id)
return comfy_io.NodeOutput(await download_and_process_images(image_urls)) return IO.NodeOutput(await download_and_process_images(image_urls))
class IdeogramExtension(ComfyExtension): class IdeogramExtension(ComfyExtension):
@override @override
async def get_node_list(self) -> list[type[comfy_io.ComfyNode]]: async def get_node_list(self) -> list[type[IO.ComfyNode]]:
return [ return [
IdeogramV1, IdeogramV1,
IdeogramV2, IdeogramV2,

View File

@ -76,7 +76,7 @@ from comfy_api_nodes.util.validation_utils import (
from comfy_api.input_impl import VideoFromFile from comfy_api.input_impl import VideoFromFile
from comfy_api.input.basic_types import AudioInput from comfy_api.input.basic_types import AudioInput
from comfy_api.input.video_types import VideoInput from comfy_api.input.video_types import VideoInput
from comfy_api.latest import ComfyExtension, io as comfy_io from comfy_api.latest import ComfyExtension, IO
KLING_API_VERSION = "v1" KLING_API_VERSION = "v1"
PATH_TEXT_TO_VIDEO = f"/proxy/kling/{KLING_API_VERSION}/videos/text2video" PATH_TEXT_TO_VIDEO = f"/proxy/kling/{KLING_API_VERSION}/videos/text2video"
@ -387,7 +387,7 @@ async def execute_text2video(
duration: str, duration: str,
aspect_ratio: str, aspect_ratio: str,
camera_control: Optional[KlingCameraControl] = None, camera_control: Optional[KlingCameraControl] = None,
) -> comfy_io.NodeOutput: ) -> IO.NodeOutput:
validate_prompts(prompt, negative_prompt, MAX_PROMPT_LENGTH_T2V) validate_prompts(prompt, negative_prompt, MAX_PROMPT_LENGTH_T2V)
initial_operation = SynchronousOperation( initial_operation = SynchronousOperation(
endpoint=ApiEndpoint( endpoint=ApiEndpoint(
@ -428,7 +428,7 @@ async def execute_text2video(
validate_video_result_response(final_response) validate_video_result_response(final_response)
video = get_video_from_response(final_response) video = get_video_from_response(final_response)
return comfy_io.NodeOutput(await download_url_to_video_output(str(video.url)), str(video.id), str(video.duration)) return IO.NodeOutput(await download_url_to_video_output(str(video.url)), str(video.id), str(video.duration))
async def execute_image2video( async def execute_image2video(
@ -444,7 +444,7 @@ async def execute_image2video(
duration: str, duration: str,
camera_control: Optional[KlingCameraControl] = None, camera_control: Optional[KlingCameraControl] = None,
end_frame: Optional[torch.Tensor] = None, end_frame: Optional[torch.Tensor] = None,
) -> comfy_io.NodeOutput: ) -> IO.NodeOutput:
validate_prompts(prompt, negative_prompt, MAX_PROMPT_LENGTH_I2V) validate_prompts(prompt, negative_prompt, MAX_PROMPT_LENGTH_I2V)
validate_input_image(start_frame) validate_input_image(start_frame)
@ -499,7 +499,7 @@ async def execute_image2video(
validate_video_result_response(final_response) validate_video_result_response(final_response)
video = get_video_from_response(final_response) video = get_video_from_response(final_response)
return comfy_io.NodeOutput(await download_url_to_video_output(str(video.url)), str(video.id), str(video.duration)) return IO.NodeOutput(await download_url_to_video_output(str(video.url)), str(video.id), str(video.duration))
async def execute_video_effect( async def execute_video_effect(
@ -576,7 +576,7 @@ async def execute_lipsync(
text: Optional[str] = None, text: Optional[str] = None,
voice_speed: Optional[float] = None, voice_speed: Optional[float] = None,
voice_id: Optional[str] = None, voice_id: Optional[str] = None,
) -> comfy_io.NodeOutput: ) -> IO.NodeOutput:
if text: if text:
validate_string(text, field_name="Text", max_length=MAX_PROMPT_LENGTH_LIP_SYNC) validate_string(text, field_name="Text", max_length=MAX_PROMPT_LENGTH_LIP_SYNC)
validate_video_dimensions(video, 720, 1920) validate_video_dimensions(video, 720, 1920)
@ -634,77 +634,77 @@ async def execute_lipsync(
validate_video_result_response(final_response) validate_video_result_response(final_response)
video = get_video_from_response(final_response) video = get_video_from_response(final_response)
return comfy_io.NodeOutput(await download_url_to_video_output(str(video.url)), str(video.id), str(video.duration)) return IO.NodeOutput(await download_url_to_video_output(str(video.url)), str(video.id), str(video.duration))
class KlingCameraControls(comfy_io.ComfyNode): class KlingCameraControls(IO.ComfyNode):
"""Kling Camera Controls Node""" """Kling Camera Controls Node"""
@classmethod @classmethod
def define_schema(cls) -> comfy_io.Schema: def define_schema(cls) -> IO.Schema:
return comfy_io.Schema( return IO.Schema(
node_id="KlingCameraControls", node_id="KlingCameraControls",
display_name="Kling Camera Controls", display_name="Kling Camera Controls",
category="api node/video/Kling", category="api node/video/Kling",
description="Allows specifying configuration options for Kling Camera Controls and motion control effects.", description="Allows specifying configuration options for Kling Camera Controls and motion control effects.",
inputs=[ inputs=[
comfy_io.Combo.Input("camera_control_type", options=KlingCameraControlType), IO.Combo.Input("camera_control_type", options=KlingCameraControlType),
comfy_io.Float.Input( IO.Float.Input(
"horizontal_movement", "horizontal_movement",
default=0.0, default=0.0,
min=-10.0, min=-10.0,
max=10.0, max=10.0,
step=0.25, step=0.25,
display_mode=comfy_io.NumberDisplay.slider, display_mode=IO.NumberDisplay.slider,
tooltip="Controls camera's movement along horizontal axis (x-axis). Negative indicates left, positive indicates right", tooltip="Controls camera's movement along horizontal axis (x-axis). Negative indicates left, positive indicates right",
), ),
comfy_io.Float.Input( IO.Float.Input(
"vertical_movement", "vertical_movement",
default=0.0, default=0.0,
min=-10.0, min=-10.0,
max=10.0, max=10.0,
step=0.25, step=0.25,
display_mode=comfy_io.NumberDisplay.slider, display_mode=IO.NumberDisplay.slider,
tooltip="Controls camera's movement along vertical axis (y-axis). Negative indicates downward, positive indicates upward.", tooltip="Controls camera's movement along vertical axis (y-axis). Negative indicates downward, positive indicates upward.",
), ),
comfy_io.Float.Input( IO.Float.Input(
"pan", "pan",
default=0.5, default=0.5,
min=-10.0, min=-10.0,
max=10.0, max=10.0,
step=0.25, step=0.25,
display_mode=comfy_io.NumberDisplay.slider, display_mode=IO.NumberDisplay.slider,
tooltip="Controls camera's rotation in vertical plane (x-axis). Negative indicates downward rotation, positive indicates upward rotation.", tooltip="Controls camera's rotation in vertical plane (x-axis). Negative indicates downward rotation, positive indicates upward rotation.",
), ),
comfy_io.Float.Input( IO.Float.Input(
"tilt", "tilt",
default=0.0, default=0.0,
min=-10.0, min=-10.0,
max=10.0, max=10.0,
step=0.25, step=0.25,
display_mode=comfy_io.NumberDisplay.slider, display_mode=IO.NumberDisplay.slider,
tooltip="Controls camera's rotation in horizontal plane (y-axis). Negative indicates left rotation, positive indicates right rotation.", tooltip="Controls camera's rotation in horizontal plane (y-axis). Negative indicates left rotation, positive indicates right rotation.",
), ),
comfy_io.Float.Input( IO.Float.Input(
"roll", "roll",
default=0.0, default=0.0,
min=-10.0, min=-10.0,
max=10.0, max=10.0,
step=0.25, step=0.25,
display_mode=comfy_io.NumberDisplay.slider, display_mode=IO.NumberDisplay.slider,
tooltip="Controls camera's rolling amount (z-axis). Negative indicates counterclockwise, positive indicates clockwise.", tooltip="Controls camera's rolling amount (z-axis). Negative indicates counterclockwise, positive indicates clockwise.",
), ),
comfy_io.Float.Input( IO.Float.Input(
"zoom", "zoom",
default=0.0, default=0.0,
min=-10.0, min=-10.0,
max=10.0, max=10.0,
step=0.25, step=0.25,
display_mode=comfy_io.NumberDisplay.slider, display_mode=IO.NumberDisplay.slider,
tooltip="Controls change in camera's focal length. Negative indicates narrower field of view, positive indicates wider field of view.", tooltip="Controls change in camera's focal length. Negative indicates narrower field of view, positive indicates wider field of view.",
), ),
], ],
outputs=[comfy_io.Custom("CAMERA_CONTROL").Output(display_name="camera_control")], outputs=[IO.Custom("CAMERA_CONTROL").Output(display_name="camera_control")],
) )
@classmethod @classmethod
@ -740,8 +740,8 @@ class KlingCameraControls(comfy_io.ComfyNode):
tilt: float, tilt: float,
roll: float, roll: float,
zoom: float, zoom: float,
) -> comfy_io.NodeOutput: ) -> IO.NodeOutput:
return comfy_io.NodeOutput( return IO.NodeOutput(
KlingCameraControl( KlingCameraControl(
type=KlingCameraControlType(camera_control_type), type=KlingCameraControlType(camera_control_type),
config=KlingCameraConfig( config=KlingCameraConfig(
@ -756,27 +756,27 @@ class KlingCameraControls(comfy_io.ComfyNode):
) )
class KlingTextToVideoNode(comfy_io.ComfyNode): class KlingTextToVideoNode(IO.ComfyNode):
"""Kling Text to Video Node""" """Kling Text to Video Node"""
@classmethod @classmethod
def define_schema(cls) -> comfy_io.Schema: def define_schema(cls) -> IO.Schema:
modes = list(MODE_TEXT2VIDEO.keys()) modes = list(MODE_TEXT2VIDEO.keys())
return comfy_io.Schema( return IO.Schema(
node_id="KlingTextToVideoNode", node_id="KlingTextToVideoNode",
display_name="Kling Text to Video", display_name="Kling Text to Video",
category="api node/video/Kling", category="api node/video/Kling",
description="Kling Text to Video Node", description="Kling Text to Video Node",
inputs=[ inputs=[
comfy_io.String.Input("prompt", multiline=True, tooltip="Positive text prompt"), IO.String.Input("prompt", multiline=True, tooltip="Positive text prompt"),
comfy_io.String.Input("negative_prompt", multiline=True, tooltip="Negative text prompt"), IO.String.Input("negative_prompt", multiline=True, tooltip="Negative text prompt"),
comfy_io.Float.Input("cfg_scale", default=1.0, min=0.0, max=1.0), IO.Float.Input("cfg_scale", default=1.0, min=0.0, max=1.0),
comfy_io.Combo.Input( IO.Combo.Input(
"aspect_ratio", "aspect_ratio",
options=KlingVideoGenAspectRatio, options=KlingVideoGenAspectRatio,
default="16:9", default="16:9",
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"mode", "mode",
options=modes, options=modes,
default=modes[4], default=modes[4],
@ -784,14 +784,14 @@ class KlingTextToVideoNode(comfy_io.ComfyNode):
), ),
], ],
outputs=[ outputs=[
comfy_io.Video.Output(), IO.Video.Output(),
comfy_io.String.Output(display_name="video_id"), IO.String.Output(display_name="video_id"),
comfy_io.String.Output(display_name="duration"), IO.String.Output(display_name="duration"),
], ],
hidden=[ hidden=[
comfy_io.Hidden.auth_token_comfy_org, IO.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org, IO.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id, IO.Hidden.unique_id,
], ],
is_api_node=True, is_api_node=True,
) )
@ -804,7 +804,7 @@ class KlingTextToVideoNode(comfy_io.ComfyNode):
cfg_scale: float, cfg_scale: float,
mode: str, mode: str,
aspect_ratio: str, aspect_ratio: str,
) -> comfy_io.NodeOutput: ) -> IO.NodeOutput:
model_mode, duration, model_name = MODE_TEXT2VIDEO[mode] model_mode, duration, model_name = MODE_TEXT2VIDEO[mode]
return await execute_text2video( return await execute_text2video(
auth_kwargs={ auth_kwargs={
@ -822,42 +822,42 @@ class KlingTextToVideoNode(comfy_io.ComfyNode):
) )
class KlingCameraControlT2VNode(comfy_io.ComfyNode): class KlingCameraControlT2VNode(IO.ComfyNode):
""" """
Kling Text to Video Camera Control Node. This node is a text to video node, but it supports controlling the camera. Kling Text to Video Camera Control Node. This node is a text to video node, but it supports controlling the camera.
Duration, mode, and model_name request fields are hard-coded because camera control is only supported in pro mode with the kling-v1-5 model at 5s duration as of 2025-05-02. Duration, mode, and model_name request fields are hard-coded because camera control is only supported in pro mode with the kling-v1-5 model at 5s duration as of 2025-05-02.
""" """
@classmethod @classmethod
def define_schema(cls) -> comfy_io.Schema: def define_schema(cls) -> IO.Schema:
return comfy_io.Schema( return IO.Schema(
node_id="KlingCameraControlT2VNode", node_id="KlingCameraControlT2VNode",
display_name="Kling Text to Video (Camera Control)", display_name="Kling Text to Video (Camera Control)",
category="api node/video/Kling", category="api node/video/Kling",
description="Transform text into cinematic videos with professional camera movements that simulate real-world cinematography. Control virtual camera actions including zoom, rotation, pan, tilt, and first-person view, while maintaining focus on your original text.", description="Transform text into cinematic videos with professional camera movements that simulate real-world cinematography. Control virtual camera actions including zoom, rotation, pan, tilt, and first-person view, while maintaining focus on your original text.",
inputs=[ inputs=[
comfy_io.String.Input("prompt", multiline=True, tooltip="Positive text prompt"), IO.String.Input("prompt", multiline=True, tooltip="Positive text prompt"),
comfy_io.String.Input("negative_prompt", multiline=True, tooltip="Negative text prompt"), IO.String.Input("negative_prompt", multiline=True, tooltip="Negative text prompt"),
comfy_io.Float.Input("cfg_scale", default=0.75, min=0.0, max=1.0), IO.Float.Input("cfg_scale", default=0.75, min=0.0, max=1.0),
comfy_io.Combo.Input( IO.Combo.Input(
"aspect_ratio", "aspect_ratio",
options=KlingVideoGenAspectRatio, options=KlingVideoGenAspectRatio,
default="16:9", default="16:9",
), ),
comfy_io.Custom("CAMERA_CONTROL").Input( IO.Custom("CAMERA_CONTROL").Input(
"camera_control", "camera_control",
tooltip="Can be created using the Kling Camera Controls node. Controls the camera movement and motion during the video generation.", tooltip="Can be created using the Kling Camera Controls node. Controls the camera movement and motion during the video generation.",
), ),
], ],
outputs=[ outputs=[
comfy_io.Video.Output(), IO.Video.Output(),
comfy_io.String.Output(display_name="video_id"), IO.String.Output(display_name="video_id"),
comfy_io.String.Output(display_name="duration"), IO.String.Output(display_name="duration"),
], ],
hidden=[ hidden=[
comfy_io.Hidden.auth_token_comfy_org, IO.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org, IO.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id, IO.Hidden.unique_id,
], ],
is_api_node=True, is_api_node=True,
) )
@ -870,7 +870,7 @@ class KlingCameraControlT2VNode(comfy_io.ComfyNode):
cfg_scale: float, cfg_scale: float,
aspect_ratio: str, aspect_ratio: str,
camera_control: Optional[KlingCameraControl] = None, camera_control: Optional[KlingCameraControl] = None,
) -> comfy_io.NodeOutput: ) -> IO.NodeOutput:
return await execute_text2video( return await execute_text2video(
auth_kwargs={ auth_kwargs={
"auth_token": cls.hidden.auth_token_comfy_org, "auth_token": cls.hidden.auth_token_comfy_org,
@ -888,43 +888,43 @@ class KlingCameraControlT2VNode(comfy_io.ComfyNode):
) )
class KlingImage2VideoNode(comfy_io.ComfyNode): class KlingImage2VideoNode(IO.ComfyNode):
"""Kling Image to Video Node""" """Kling Image to Video Node"""
@classmethod @classmethod
def define_schema(cls) -> comfy_io.Schema: def define_schema(cls) -> IO.Schema:
return comfy_io.Schema( return IO.Schema(
node_id="KlingImage2VideoNode", node_id="KlingImage2VideoNode",
display_name="Kling Image to Video", display_name="Kling Image to Video",
category="api node/video/Kling", category="api node/video/Kling",
description="Kling Image to Video Node", description="Kling Image to Video Node",
inputs=[ inputs=[
comfy_io.Image.Input("start_frame", tooltip="The reference image used to generate the video."), IO.Image.Input("start_frame", tooltip="The reference image used to generate the video."),
comfy_io.String.Input("prompt", multiline=True, tooltip="Positive text prompt"), IO.String.Input("prompt", multiline=True, tooltip="Positive text prompt"),
comfy_io.String.Input("negative_prompt", multiline=True, tooltip="Negative text prompt"), IO.String.Input("negative_prompt", multiline=True, tooltip="Negative text prompt"),
comfy_io.Combo.Input( IO.Combo.Input(
"model_name", "model_name",
options=KlingVideoGenModelName, options=KlingVideoGenModelName,
default="kling-v2-master", default="kling-v2-master",
), ),
comfy_io.Float.Input("cfg_scale", default=0.8, min=0.0, max=1.0), IO.Float.Input("cfg_scale", default=0.8, min=0.0, max=1.0),
comfy_io.Combo.Input("mode", options=KlingVideoGenMode, default=KlingVideoGenMode.std), IO.Combo.Input("mode", options=KlingVideoGenMode, default=KlingVideoGenMode.std),
comfy_io.Combo.Input( IO.Combo.Input(
"aspect_ratio", "aspect_ratio",
options=KlingVideoGenAspectRatio, options=KlingVideoGenAspectRatio,
default=KlingVideoGenAspectRatio.field_16_9, default=KlingVideoGenAspectRatio.field_16_9,
), ),
comfy_io.Combo.Input("duration", options=KlingVideoGenDuration, default=KlingVideoGenDuration.field_5), IO.Combo.Input("duration", options=KlingVideoGenDuration, default=KlingVideoGenDuration.field_5),
], ],
outputs=[ outputs=[
comfy_io.Video.Output(), IO.Video.Output(),
comfy_io.String.Output(display_name="video_id"), IO.String.Output(display_name="video_id"),
comfy_io.String.Output(display_name="duration"), IO.String.Output(display_name="duration"),
], ],
hidden=[ hidden=[
comfy_io.Hidden.auth_token_comfy_org, IO.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org, IO.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id, IO.Hidden.unique_id,
], ],
is_api_node=True, is_api_node=True,
) )
@ -942,7 +942,7 @@ class KlingImage2VideoNode(comfy_io.ComfyNode):
duration: str, duration: str,
camera_control: Optional[KlingCameraControl] = None, camera_control: Optional[KlingCameraControl] = None,
end_frame: Optional[torch.Tensor] = None, end_frame: Optional[torch.Tensor] = None,
) -> comfy_io.NodeOutput: ) -> IO.NodeOutput:
return await execute_image2video( return await execute_image2video(
auth_kwargs={ auth_kwargs={
"auth_token": cls.hidden.auth_token_comfy_org, "auth_token": cls.hidden.auth_token_comfy_org,
@ -962,46 +962,46 @@ class KlingImage2VideoNode(comfy_io.ComfyNode):
) )
class KlingCameraControlI2VNode(comfy_io.ComfyNode): class KlingCameraControlI2VNode(IO.ComfyNode):
""" """
Kling Image to Video Camera Control Node. This node is a image to video node, but it supports controlling the camera. Kling Image to Video Camera Control Node. This node is a image to video node, but it supports controlling the camera.
Duration, mode, and model_name request fields are hard-coded because camera control is only supported in pro mode with the kling-v1-5 model at 5s duration as of 2025-05-02. Duration, mode, and model_name request fields are hard-coded because camera control is only supported in pro mode with the kling-v1-5 model at 5s duration as of 2025-05-02.
""" """
@classmethod @classmethod
def define_schema(cls) -> comfy_io.Schema: def define_schema(cls) -> IO.Schema:
return comfy_io.Schema( return IO.Schema(
node_id="KlingCameraControlI2VNode", node_id="KlingCameraControlI2VNode",
display_name="Kling Image to Video (Camera Control)", display_name="Kling Image to Video (Camera Control)",
category="api node/video/Kling", category="api node/video/Kling",
description="Transform still images into cinematic videos with professional camera movements that simulate real-world cinematography. Control virtual camera actions including zoom, rotation, pan, tilt, and first-person view, while maintaining focus on your original image.", description="Transform still images into cinematic videos with professional camera movements that simulate real-world cinematography. Control virtual camera actions including zoom, rotation, pan, tilt, and first-person view, while maintaining focus on your original image.",
inputs=[ inputs=[
comfy_io.Image.Input( IO.Image.Input(
"start_frame", "start_frame",
tooltip="Reference Image - URL or Base64 encoded string, cannot exceed 10MB, resolution not less than 300*300px, aspect ratio between 1:2.5 ~ 2.5:1. Base64 should not include data:image prefix.", tooltip="Reference Image - URL or Base64 encoded string, cannot exceed 10MB, resolution not less than 300*300px, aspect ratio between 1:2.5 ~ 2.5:1. Base64 should not include data:image prefix.",
), ),
comfy_io.String.Input("prompt", multiline=True, tooltip="Positive text prompt"), IO.String.Input("prompt", multiline=True, tooltip="Positive text prompt"),
comfy_io.String.Input("negative_prompt", multiline=True, tooltip="Negative text prompt"), IO.String.Input("negative_prompt", multiline=True, tooltip="Negative text prompt"),
comfy_io.Float.Input("cfg_scale", default=0.75, min=0.0, max=1.0), IO.Float.Input("cfg_scale", default=0.75, min=0.0, max=1.0),
comfy_io.Combo.Input( IO.Combo.Input(
"aspect_ratio", "aspect_ratio",
options=KlingVideoGenAspectRatio, options=KlingVideoGenAspectRatio,
default=KlingVideoGenAspectRatio.field_16_9, default=KlingVideoGenAspectRatio.field_16_9,
), ),
comfy_io.Custom("CAMERA_CONTROL").Input( IO.Custom("CAMERA_CONTROL").Input(
"camera_control", "camera_control",
tooltip="Can be created using the Kling Camera Controls node. Controls the camera movement and motion during the video generation.", tooltip="Can be created using the Kling Camera Controls node. Controls the camera movement and motion during the video generation.",
), ),
], ],
outputs=[ outputs=[
comfy_io.Video.Output(), IO.Video.Output(),
comfy_io.String.Output(display_name="video_id"), IO.String.Output(display_name="video_id"),
comfy_io.String.Output(display_name="duration"), IO.String.Output(display_name="duration"),
], ],
hidden=[ hidden=[
comfy_io.Hidden.auth_token_comfy_org, IO.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org, IO.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id, IO.Hidden.unique_id,
], ],
is_api_node=True, is_api_node=True,
) )
@ -1015,7 +1015,7 @@ class KlingCameraControlI2VNode(comfy_io.ComfyNode):
cfg_scale: float, cfg_scale: float,
aspect_ratio: str, aspect_ratio: str,
camera_control: KlingCameraControl, camera_control: KlingCameraControl,
) -> comfy_io.NodeOutput: ) -> IO.NodeOutput:
return await execute_image2video( return await execute_image2video(
auth_kwargs={ auth_kwargs={
"auth_token": cls.hidden.auth_token_comfy_org, "auth_token": cls.hidden.auth_token_comfy_org,
@ -1034,37 +1034,37 @@ class KlingCameraControlI2VNode(comfy_io.ComfyNode):
) )
class KlingStartEndFrameNode(comfy_io.ComfyNode): class KlingStartEndFrameNode(IO.ComfyNode):
""" """
Kling First Last Frame Node. This node allows creation of a video from a first and last frame. It calls the normal image to video endpoint, but only allows the subset of input options that support the `image_tail` request field. Kling First Last Frame Node. This node allows creation of a video from a first and last frame. It calls the normal image to video endpoint, but only allows the subset of input options that support the `image_tail` request field.
""" """
@classmethod @classmethod
def define_schema(cls) -> comfy_io.Schema: def define_schema(cls) -> IO.Schema:
modes = list(MODE_START_END_FRAME.keys()) modes = list(MODE_START_END_FRAME.keys())
return comfy_io.Schema( return IO.Schema(
node_id="KlingStartEndFrameNode", node_id="KlingStartEndFrameNode",
display_name="Kling Start-End Frame to Video", display_name="Kling Start-End Frame to Video",
category="api node/video/Kling", category="api node/video/Kling",
description="Generate a video sequence that transitions between your provided start and end images. The node creates all frames in between, producing a smooth transformation from the first frame to the last.", description="Generate a video sequence that transitions between your provided start and end images. The node creates all frames in between, producing a smooth transformation from the first frame to the last.",
inputs=[ inputs=[
comfy_io.Image.Input( IO.Image.Input(
"start_frame", "start_frame",
tooltip="Reference Image - URL or Base64 encoded string, cannot exceed 10MB, resolution not less than 300*300px, aspect ratio between 1:2.5 ~ 2.5:1. Base64 should not include data:image prefix.", tooltip="Reference Image - URL or Base64 encoded string, cannot exceed 10MB, resolution not less than 300*300px, aspect ratio between 1:2.5 ~ 2.5:1. Base64 should not include data:image prefix.",
), ),
comfy_io.Image.Input( IO.Image.Input(
"end_frame", "end_frame",
tooltip="Reference Image - End frame control. URL or Base64 encoded string, cannot exceed 10MB, resolution not less than 300*300px. Base64 should not include data:image prefix.", tooltip="Reference Image - End frame control. URL or Base64 encoded string, cannot exceed 10MB, resolution not less than 300*300px. Base64 should not include data:image prefix.",
), ),
comfy_io.String.Input("prompt", multiline=True, tooltip="Positive text prompt"), IO.String.Input("prompt", multiline=True, tooltip="Positive text prompt"),
comfy_io.String.Input("negative_prompt", multiline=True, tooltip="Negative text prompt"), IO.String.Input("negative_prompt", multiline=True, tooltip="Negative text prompt"),
comfy_io.Float.Input("cfg_scale", default=0.5, min=0.0, max=1.0), IO.Float.Input("cfg_scale", default=0.5, min=0.0, max=1.0),
comfy_io.Combo.Input( IO.Combo.Input(
"aspect_ratio", "aspect_ratio",
options=[i.value for i in KlingVideoGenAspectRatio], options=[i.value for i in KlingVideoGenAspectRatio],
default="16:9", default="16:9",
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"mode", "mode",
options=modes, options=modes,
default=modes[2], default=modes[2],
@ -1072,14 +1072,14 @@ class KlingStartEndFrameNode(comfy_io.ComfyNode):
), ),
], ],
outputs=[ outputs=[
comfy_io.Video.Output(), IO.Video.Output(),
comfy_io.String.Output(display_name="video_id"), IO.String.Output(display_name="video_id"),
comfy_io.String.Output(display_name="duration"), IO.String.Output(display_name="duration"),
], ],
hidden=[ hidden=[
comfy_io.Hidden.auth_token_comfy_org, IO.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org, IO.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id, IO.Hidden.unique_id,
], ],
is_api_node=True, is_api_node=True,
) )
@ -1094,7 +1094,7 @@ class KlingStartEndFrameNode(comfy_io.ComfyNode):
cfg_scale: float, cfg_scale: float,
aspect_ratio: str, aspect_ratio: str,
mode: str, mode: str,
) -> comfy_io.NodeOutput: ) -> IO.NodeOutput:
mode, duration, model_name = MODE_START_END_FRAME[mode] mode, duration, model_name = MODE_START_END_FRAME[mode]
return await execute_image2video( return await execute_image2video(
auth_kwargs={ auth_kwargs={
@ -1114,41 +1114,41 @@ class KlingStartEndFrameNode(comfy_io.ComfyNode):
) )
class KlingVideoExtendNode(comfy_io.ComfyNode): class KlingVideoExtendNode(IO.ComfyNode):
@classmethod @classmethod
def define_schema(cls) -> comfy_io.Schema: def define_schema(cls) -> IO.Schema:
return comfy_io.Schema( return IO.Schema(
node_id="KlingVideoExtendNode", node_id="KlingVideoExtendNode",
display_name="Kling Video Extend", display_name="Kling Video Extend",
category="api node/video/Kling", category="api node/video/Kling",
description="Kling Video Extend Node. Extend videos made by other Kling nodes. The video_id is created by using other Kling Nodes.", description="Kling Video Extend Node. Extend videos made by other Kling nodes. The video_id is created by using other Kling Nodes.",
inputs=[ inputs=[
comfy_io.String.Input( IO.String.Input(
"prompt", "prompt",
multiline=True, multiline=True,
tooltip="Positive text prompt for guiding the video extension", tooltip="Positive text prompt for guiding the video extension",
), ),
comfy_io.String.Input( IO.String.Input(
"negative_prompt", "negative_prompt",
multiline=True, multiline=True,
tooltip="Negative text prompt for elements to avoid in the extended video", tooltip="Negative text prompt for elements to avoid in the extended video",
), ),
comfy_io.Float.Input("cfg_scale", default=0.5, min=0.0, max=1.0), IO.Float.Input("cfg_scale", default=0.5, min=0.0, max=1.0),
comfy_io.String.Input( IO.String.Input(
"video_id", "video_id",
force_input=True, force_input=True,
tooltip="The ID of the video to be extended. Supports videos generated by text-to-video, image-to-video, and previous video extension operations. Cannot exceed 3 minutes total duration after extension.", tooltip="The ID of the video to be extended. Supports videos generated by text-to-video, image-to-video, and previous video extension operations. Cannot exceed 3 minutes total duration after extension.",
), ),
], ],
outputs=[ outputs=[
comfy_io.Video.Output(), IO.Video.Output(),
comfy_io.String.Output(display_name="video_id"), IO.String.Output(display_name="video_id"),
comfy_io.String.Output(display_name="duration"), IO.String.Output(display_name="duration"),
], ],
hidden=[ hidden=[
comfy_io.Hidden.auth_token_comfy_org, IO.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org, IO.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id, IO.Hidden.unique_id,
], ],
is_api_node=True, is_api_node=True,
) )
@ -1160,7 +1160,7 @@ class KlingVideoExtendNode(comfy_io.ComfyNode):
negative_prompt: str, negative_prompt: str,
cfg_scale: float, cfg_scale: float,
video_id: str, video_id: str,
) -> comfy_io.NodeOutput: ) -> IO.NodeOutput:
validate_prompts(prompt, negative_prompt, MAX_PROMPT_LENGTH_T2V) validate_prompts(prompt, negative_prompt, MAX_PROMPT_LENGTH_T2V)
auth = { auth = {
"auth_token": cls.hidden.auth_token_comfy_org, "auth_token": cls.hidden.auth_token_comfy_org,
@ -1201,49 +1201,49 @@ class KlingVideoExtendNode(comfy_io.ComfyNode):
validate_video_result_response(final_response) validate_video_result_response(final_response)
video = get_video_from_response(final_response) video = get_video_from_response(final_response)
return comfy_io.NodeOutput(await download_url_to_video_output(str(video.url)), str(video.id), str(video.duration)) return IO.NodeOutput(await download_url_to_video_output(str(video.url)), str(video.id), str(video.duration))
class KlingDualCharacterVideoEffectNode(comfy_io.ComfyNode): class KlingDualCharacterVideoEffectNode(IO.ComfyNode):
"""Kling Dual Character Video Effect Node""" """Kling Dual Character Video Effect Node"""
@classmethod @classmethod
def define_schema(cls) -> comfy_io.Schema: def define_schema(cls) -> IO.Schema:
return comfy_io.Schema( return IO.Schema(
node_id="KlingDualCharacterVideoEffectNode", node_id="KlingDualCharacterVideoEffectNode",
display_name="Kling Dual Character Video Effects", display_name="Kling Dual Character Video Effects",
category="api node/video/Kling", category="api node/video/Kling",
description="Achieve different special effects when generating a video based on the effect_scene. First image will be positioned on left side, second on right side of the composite.", description="Achieve different special effects when generating a video based on the effect_scene. First image will be positioned on left side, second on right side of the composite.",
inputs=[ inputs=[
comfy_io.Image.Input("image_left", tooltip="Left side image"), IO.Image.Input("image_left", tooltip="Left side image"),
comfy_io.Image.Input("image_right", tooltip="Right side image"), IO.Image.Input("image_right", tooltip="Right side image"),
comfy_io.Combo.Input( IO.Combo.Input(
"effect_scene", "effect_scene",
options=[i.value for i in KlingDualCharacterEffectsScene], options=[i.value for i in KlingDualCharacterEffectsScene],
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"model_name", "model_name",
options=[i.value for i in KlingCharacterEffectModelName], options=[i.value for i in KlingCharacterEffectModelName],
default="kling-v1", default="kling-v1",
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"mode", "mode",
options=[i.value for i in KlingVideoGenMode], options=[i.value for i in KlingVideoGenMode],
default="std", default="std",
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"duration", "duration",
options=[i.value for i in KlingVideoGenDuration], options=[i.value for i in KlingVideoGenDuration],
), ),
], ],
outputs=[ outputs=[
comfy_io.Video.Output(), IO.Video.Output(),
comfy_io.String.Output(display_name="duration"), IO.String.Output(display_name="duration"),
], ],
hidden=[ hidden=[
comfy_io.Hidden.auth_token_comfy_org, IO.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org, IO.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id, IO.Hidden.unique_id,
], ],
is_api_node=True, is_api_node=True,
) )
@ -1257,7 +1257,7 @@ class KlingDualCharacterVideoEffectNode(comfy_io.ComfyNode):
model_name: KlingCharacterEffectModelName, model_name: KlingCharacterEffectModelName,
mode: KlingVideoGenMode, mode: KlingVideoGenMode,
duration: KlingVideoGenDuration, duration: KlingVideoGenDuration,
) -> comfy_io.NodeOutput: ) -> IO.NodeOutput:
video, _, duration = await execute_video_effect( video, _, duration = await execute_video_effect(
auth_kwargs={ auth_kwargs={
"auth_token": cls.hidden.auth_token_comfy_org, "auth_token": cls.hidden.auth_token_comfy_org,
@ -1272,43 +1272,43 @@ class KlingDualCharacterVideoEffectNode(comfy_io.ComfyNode):
image_1=image_left, image_1=image_left,
image_2=image_right, image_2=image_right,
) )
return comfy_io.NodeOutput(video, duration) return IO.NodeOutput(video, duration)
class KlingSingleImageVideoEffectNode(comfy_io.ComfyNode): class KlingSingleImageVideoEffectNode(IO.ComfyNode):
"""Kling Single Image Video Effect Node""" """Kling Single Image Video Effect Node"""
@classmethod @classmethod
def define_schema(cls) -> comfy_io.Schema: def define_schema(cls) -> IO.Schema:
return comfy_io.Schema( return IO.Schema(
node_id="KlingSingleImageVideoEffectNode", node_id="KlingSingleImageVideoEffectNode",
display_name="Kling Video Effects", display_name="Kling Video Effects",
category="api node/video/Kling", category="api node/video/Kling",
description="Achieve different special effects when generating a video based on the effect_scene.", description="Achieve different special effects when generating a video based on the effect_scene.",
inputs=[ inputs=[
comfy_io.Image.Input("image", tooltip=" Reference Image. URL or Base64 encoded string (without data:image prefix). File size cannot exceed 10MB, resolution not less than 300*300px, aspect ratio between 1:2.5 ~ 2.5:1"), IO.Image.Input("image", tooltip=" Reference Image. URL or Base64 encoded string (without data:image prefix). File size cannot exceed 10MB, resolution not less than 300*300px, aspect ratio between 1:2.5 ~ 2.5:1"),
comfy_io.Combo.Input( IO.Combo.Input(
"effect_scene", "effect_scene",
options=[i.value for i in KlingSingleImageEffectsScene], options=[i.value for i in KlingSingleImageEffectsScene],
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"model_name", "model_name",
options=[i.value for i in KlingSingleImageEffectModelName], options=[i.value for i in KlingSingleImageEffectModelName],
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"duration", "duration",
options=[i.value for i in KlingVideoGenDuration], options=[i.value for i in KlingVideoGenDuration],
), ),
], ],
outputs=[ outputs=[
comfy_io.Video.Output(), IO.Video.Output(),
comfy_io.String.Output(display_name="video_id"), IO.String.Output(display_name="video_id"),
comfy_io.String.Output(display_name="duration"), IO.String.Output(display_name="duration"),
], ],
hidden=[ hidden=[
comfy_io.Hidden.auth_token_comfy_org, IO.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org, IO.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id, IO.Hidden.unique_id,
], ],
is_api_node=True, is_api_node=True,
) )
@ -1320,8 +1320,8 @@ class KlingSingleImageVideoEffectNode(comfy_io.ComfyNode):
effect_scene: KlingSingleImageEffectsScene, effect_scene: KlingSingleImageEffectsScene,
model_name: KlingSingleImageEffectModelName, model_name: KlingSingleImageEffectModelName,
duration: KlingVideoGenDuration, duration: KlingVideoGenDuration,
) -> comfy_io.NodeOutput: ) -> IO.NodeOutput:
return comfy_io.NodeOutput( return IO.NodeOutput(
*( *(
await execute_video_effect( await execute_video_effect(
auth_kwargs={ auth_kwargs={
@ -1339,34 +1339,34 @@ class KlingSingleImageVideoEffectNode(comfy_io.ComfyNode):
) )
class KlingLipSyncAudioToVideoNode(comfy_io.ComfyNode): class KlingLipSyncAudioToVideoNode(IO.ComfyNode):
"""Kling Lip Sync Audio to Video Node. Syncs mouth movements in a video file to the audio content of an audio file.""" """Kling Lip Sync Audio to Video Node. Syncs mouth movements in a video file to the audio content of an audio file."""
@classmethod @classmethod
def define_schema(cls) -> comfy_io.Schema: def define_schema(cls) -> IO.Schema:
return comfy_io.Schema( return IO.Schema(
node_id="KlingLipSyncAudioToVideoNode", node_id="KlingLipSyncAudioToVideoNode",
display_name="Kling Lip Sync Video with Audio", display_name="Kling Lip Sync Video with Audio",
category="api node/video/Kling", category="api node/video/Kling",
description="Kling Lip Sync Audio to Video Node. Syncs mouth movements in a video file to the audio content of an audio file. When using, ensure that the audio contains clearly distinguishable vocals and that the video contains a distinct face. The audio file should not be larger than 5MB. The video file should not be larger than 100MB, should have height/width between 720px and 1920px, and should be between 2s and 10s in length.", description="Kling Lip Sync Audio to Video Node. Syncs mouth movements in a video file to the audio content of an audio file. When using, ensure that the audio contains clearly distinguishable vocals and that the video contains a distinct face. The audio file should not be larger than 5MB. The video file should not be larger than 100MB, should have height/width between 720px and 1920px, and should be between 2s and 10s in length.",
inputs=[ inputs=[
comfy_io.Video.Input("video"), IO.Video.Input("video"),
comfy_io.Audio.Input("audio"), IO.Audio.Input("audio"),
comfy_io.Combo.Input( IO.Combo.Input(
"voice_language", "voice_language",
options=[i.value for i in KlingLipSyncVoiceLanguage], options=[i.value for i in KlingLipSyncVoiceLanguage],
default="en", default="en",
), ),
], ],
outputs=[ outputs=[
comfy_io.Video.Output(), IO.Video.Output(),
comfy_io.String.Output(display_name="video_id"), IO.String.Output(display_name="video_id"),
comfy_io.String.Output(display_name="duration"), IO.String.Output(display_name="duration"),
], ],
hidden=[ hidden=[
comfy_io.Hidden.auth_token_comfy_org, IO.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org, IO.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id, IO.Hidden.unique_id,
], ],
is_api_node=True, is_api_node=True,
) )
@ -1377,7 +1377,7 @@ class KlingLipSyncAudioToVideoNode(comfy_io.ComfyNode):
video: VideoInput, video: VideoInput,
audio: AudioInput, audio: AudioInput,
voice_language: str, voice_language: str,
) -> comfy_io.NodeOutput: ) -> IO.NodeOutput:
return await execute_lipsync( return await execute_lipsync(
auth_kwargs={ auth_kwargs={
"auth_token": cls.hidden.auth_token_comfy_org, "auth_token": cls.hidden.auth_token_comfy_org,
@ -1391,46 +1391,46 @@ class KlingLipSyncAudioToVideoNode(comfy_io.ComfyNode):
) )
class KlingLipSyncTextToVideoNode(comfy_io.ComfyNode): class KlingLipSyncTextToVideoNode(IO.ComfyNode):
"""Kling Lip Sync Text to Video Node. Syncs mouth movements in a video file to a text prompt.""" """Kling Lip Sync Text to Video Node. Syncs mouth movements in a video file to a text prompt."""
@classmethod @classmethod
def define_schema(cls) -> comfy_io.Schema: def define_schema(cls) -> IO.Schema:
return comfy_io.Schema( return IO.Schema(
node_id="KlingLipSyncTextToVideoNode", node_id="KlingLipSyncTextToVideoNode",
display_name="Kling Lip Sync Video with Text", display_name="Kling Lip Sync Video with Text",
category="api node/video/Kling", category="api node/video/Kling",
description="Kling Lip Sync Text to Video Node. Syncs mouth movements in a video file to a text prompt. The video file should not be larger than 100MB, should have height/width between 720px and 1920px, and should be between 2s and 10s in length.", description="Kling Lip Sync Text to Video Node. Syncs mouth movements in a video file to a text prompt. The video file should not be larger than 100MB, should have height/width between 720px and 1920px, and should be between 2s and 10s in length.",
inputs=[ inputs=[
comfy_io.Video.Input("video"), IO.Video.Input("video"),
comfy_io.String.Input( IO.String.Input(
"text", "text",
multiline=True, multiline=True,
tooltip="Text Content for Lip-Sync Video Generation. Required when mode is text2video. Maximum length is 120 characters.", tooltip="Text Content for Lip-Sync Video Generation. Required when mode is text2video. Maximum length is 120 characters.",
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"voice", "voice",
options=list(VOICES_CONFIG.keys()), options=list(VOICES_CONFIG.keys()),
default="Melody", default="Melody",
), ),
comfy_io.Float.Input( IO.Float.Input(
"voice_speed", "voice_speed",
default=1, default=1,
min=0.8, min=0.8,
max=2.0, max=2.0,
display_mode=comfy_io.NumberDisplay.slider, display_mode=IO.NumberDisplay.slider,
tooltip="Speech Rate. Valid range: 0.8~2.0, accurate to one decimal place.", tooltip="Speech Rate. Valid range: 0.8~2.0, accurate to one decimal place.",
), ),
], ],
outputs=[ outputs=[
comfy_io.Video.Output(), IO.Video.Output(),
comfy_io.String.Output(display_name="video_id"), IO.String.Output(display_name="video_id"),
comfy_io.String.Output(display_name="duration"), IO.String.Output(display_name="duration"),
], ],
hidden=[ hidden=[
comfy_io.Hidden.auth_token_comfy_org, IO.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org, IO.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id, IO.Hidden.unique_id,
], ],
is_api_node=True, is_api_node=True,
) )
@ -1442,7 +1442,7 @@ class KlingLipSyncTextToVideoNode(comfy_io.ComfyNode):
text: str, text: str,
voice: str, voice: str,
voice_speed: float, voice_speed: float,
) -> comfy_io.NodeOutput: ) -> IO.NodeOutput:
voice_id, voice_language = VOICES_CONFIG[voice] voice_id, voice_language = VOICES_CONFIG[voice]
return await execute_lipsync( return await execute_lipsync(
auth_kwargs={ auth_kwargs={
@ -1459,32 +1459,32 @@ class KlingLipSyncTextToVideoNode(comfy_io.ComfyNode):
) )
class KlingVirtualTryOnNode(comfy_io.ComfyNode): class KlingVirtualTryOnNode(IO.ComfyNode):
"""Kling Virtual Try On Node.""" """Kling Virtual Try On Node."""
@classmethod @classmethod
def define_schema(cls) -> comfy_io.Schema: def define_schema(cls) -> IO.Schema:
return comfy_io.Schema( return IO.Schema(
node_id="KlingVirtualTryOnNode", node_id="KlingVirtualTryOnNode",
display_name="Kling Virtual Try On", display_name="Kling Virtual Try On",
category="api node/image/Kling", category="api node/image/Kling",
description="Kling Virtual Try On Node. Input a human image and a cloth image to try on the cloth on the human. You can merge multiple clothing item pictures into one image with a white background.", description="Kling Virtual Try On Node. Input a human image and a cloth image to try on the cloth on the human. You can merge multiple clothing item pictures into one image with a white background.",
inputs=[ inputs=[
comfy_io.Image.Input("human_image"), IO.Image.Input("human_image"),
comfy_io.Image.Input("cloth_image"), IO.Image.Input("cloth_image"),
comfy_io.Combo.Input( IO.Combo.Input(
"model_name", "model_name",
options=[i.value for i in KlingVirtualTryOnModelName], options=[i.value for i in KlingVirtualTryOnModelName],
default="kolors-virtual-try-on-v1", default="kolors-virtual-try-on-v1",
), ),
], ],
outputs=[ outputs=[
comfy_io.Image.Output(), IO.Image.Output(),
], ],
hidden=[ hidden=[
comfy_io.Hidden.auth_token_comfy_org, IO.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org, IO.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id, IO.Hidden.unique_id,
], ],
is_api_node=True, is_api_node=True,
) )
@ -1495,7 +1495,7 @@ class KlingVirtualTryOnNode(comfy_io.ComfyNode):
human_image: torch.Tensor, human_image: torch.Tensor,
cloth_image: torch.Tensor, cloth_image: torch.Tensor,
model_name: KlingVirtualTryOnModelName, model_name: KlingVirtualTryOnModelName,
) -> comfy_io.NodeOutput: ) -> IO.NodeOutput:
auth = { auth = {
"auth_token": cls.hidden.auth_token_comfy_org, "auth_token": cls.hidden.auth_token_comfy_org,
"comfy_api_key": cls.hidden.api_key_comfy_org, "comfy_api_key": cls.hidden.api_key_comfy_org,
@ -1534,70 +1534,70 @@ class KlingVirtualTryOnNode(comfy_io.ComfyNode):
validate_image_result_response(final_response) validate_image_result_response(final_response)
images = get_images_from_response(final_response) images = get_images_from_response(final_response)
return comfy_io.NodeOutput(await image_result_to_node_output(images)) return IO.NodeOutput(await image_result_to_node_output(images))
class KlingImageGenerationNode(comfy_io.ComfyNode): class KlingImageGenerationNode(IO.ComfyNode):
"""Kling Image Generation Node. Generate an image from a text prompt with an optional reference image.""" """Kling Image Generation Node. Generate an image from a text prompt with an optional reference image."""
@classmethod @classmethod
def define_schema(cls) -> comfy_io.Schema: def define_schema(cls) -> IO.Schema:
return comfy_io.Schema( return IO.Schema(
node_id="KlingImageGenerationNode", node_id="KlingImageGenerationNode",
display_name="Kling Image Generation", display_name="Kling Image Generation",
category="api node/image/Kling", category="api node/image/Kling",
description="Kling Image Generation Node. Generate an image from a text prompt with an optional reference image.", description="Kling Image Generation Node. Generate an image from a text prompt with an optional reference image.",
inputs=[ inputs=[
comfy_io.String.Input("prompt", multiline=True, tooltip="Positive text prompt"), IO.String.Input("prompt", multiline=True, tooltip="Positive text prompt"),
comfy_io.String.Input("negative_prompt", multiline=True, tooltip="Negative text prompt"), IO.String.Input("negative_prompt", multiline=True, tooltip="Negative text prompt"),
comfy_io.Combo.Input( IO.Combo.Input(
"image_type", "image_type",
options=[i.value for i in KlingImageGenImageReferenceType], options=[i.value for i in KlingImageGenImageReferenceType],
), ),
comfy_io.Float.Input( IO.Float.Input(
"image_fidelity", "image_fidelity",
default=0.5, default=0.5,
min=0.0, min=0.0,
max=1.0, max=1.0,
step=0.01, step=0.01,
display_mode=comfy_io.NumberDisplay.slider, display_mode=IO.NumberDisplay.slider,
tooltip="Reference intensity for user-uploaded images", tooltip="Reference intensity for user-uploaded images",
), ),
comfy_io.Float.Input( IO.Float.Input(
"human_fidelity", "human_fidelity",
default=0.45, default=0.45,
min=0.0, min=0.0,
max=1.0, max=1.0,
step=0.01, step=0.01,
display_mode=comfy_io.NumberDisplay.slider, display_mode=IO.NumberDisplay.slider,
tooltip="Subject reference similarity", tooltip="Subject reference similarity",
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"model_name", "model_name",
options=[i.value for i in KlingImageGenModelName], options=[i.value for i in KlingImageGenModelName],
default="kling-v1", default="kling-v1",
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"aspect_ratio", "aspect_ratio",
options=[i.value for i in KlingImageGenAspectRatio], options=[i.value for i in KlingImageGenAspectRatio],
default="16:9", default="16:9",
), ),
comfy_io.Int.Input( IO.Int.Input(
"n", "n",
default=1, default=1,
min=1, min=1,
max=9, max=9,
tooltip="Number of generated images", tooltip="Number of generated images",
), ),
comfy_io.Image.Input("image", optional=True), IO.Image.Input("image", optional=True),
], ],
outputs=[ outputs=[
comfy_io.Image.Output(), IO.Image.Output(),
], ],
hidden=[ hidden=[
comfy_io.Hidden.auth_token_comfy_org, IO.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org, IO.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id, IO.Hidden.unique_id,
], ],
is_api_node=True, is_api_node=True,
) )
@ -1614,7 +1614,7 @@ class KlingImageGenerationNode(comfy_io.ComfyNode):
n: int, n: int,
aspect_ratio: KlingImageGenAspectRatio, aspect_ratio: KlingImageGenAspectRatio,
image: Optional[torch.Tensor] = None, image: Optional[torch.Tensor] = None,
) -> comfy_io.NodeOutput: ) -> IO.NodeOutput:
validate_string(prompt, field_name="prompt", min_length=1, max_length=MAX_PROMPT_LENGTH_IMAGE_GEN) validate_string(prompt, field_name="prompt", min_length=1, max_length=MAX_PROMPT_LENGTH_IMAGE_GEN)
validate_string(negative_prompt, field_name="negative_prompt", max_length=MAX_PROMPT_LENGTH_IMAGE_GEN) validate_string(negative_prompt, field_name="negative_prompt", max_length=MAX_PROMPT_LENGTH_IMAGE_GEN)
@ -1669,12 +1669,12 @@ class KlingImageGenerationNode(comfy_io.ComfyNode):
validate_image_result_response(final_response) validate_image_result_response(final_response)
images = get_images_from_response(final_response) images = get_images_from_response(final_response)
return comfy_io.NodeOutput(await image_result_to_node_output(images)) return IO.NodeOutput(await image_result_to_node_output(images))
class KlingExtension(ComfyExtension): class KlingExtension(ComfyExtension):
@override @override
async def get_node_list(self) -> list[type[comfy_io.ComfyNode]]: async def get_node_list(self) -> list[type[IO.ComfyNode]]:
return [ return [
KlingCameraControls, KlingCameraControls,
KlingTextToVideoNode, KlingTextToVideoNode,

View File

@ -2,7 +2,7 @@ from __future__ import annotations
from inspect import cleandoc from inspect import cleandoc
from typing import Optional from typing import Optional
from typing_extensions import override from typing_extensions import override
from comfy_api.latest import ComfyExtension, io as comfy_io from comfy_api.latest import ComfyExtension, IO
from comfy_api.input_impl.video_types import VideoFromFile from comfy_api.input_impl.video_types import VideoFromFile
from comfy_api_nodes.apis.luma_api import ( from comfy_api_nodes.apis.luma_api import (
LumaImageModel, LumaImageModel,
@ -52,24 +52,24 @@ def image_result_url_extractor(response: LumaGeneration):
def video_result_url_extractor(response: LumaGeneration): def video_result_url_extractor(response: LumaGeneration):
return response.assets.video if hasattr(response, "assets") and hasattr(response.assets, "video") else None return response.assets.video if hasattr(response, "assets") and hasattr(response.assets, "video") else None
class LumaReferenceNode(comfy_io.ComfyNode): class LumaReferenceNode(IO.ComfyNode):
""" """
Holds an image and weight for use with Luma Generate Image node. Holds an image and weight for use with Luma Generate Image node.
""" """
@classmethod @classmethod
def define_schema(cls) -> comfy_io.Schema: def define_schema(cls) -> IO.Schema:
return comfy_io.Schema( return IO.Schema(
node_id="LumaReferenceNode", node_id="LumaReferenceNode",
display_name="Luma Reference", display_name="Luma Reference",
category="api node/image/Luma", category="api node/image/Luma",
description=cleandoc(cls.__doc__ or ""), description=cleandoc(cls.__doc__ or ""),
inputs=[ inputs=[
comfy_io.Image.Input( IO.Image.Input(
"image", "image",
tooltip="Image to use as reference.", tooltip="Image to use as reference.",
), ),
comfy_io.Float.Input( IO.Float.Input(
"weight", "weight",
default=1.0, default=1.0,
min=0.0, min=0.0,
@ -77,71 +77,71 @@ class LumaReferenceNode(comfy_io.ComfyNode):
step=0.01, step=0.01,
tooltip="Weight of image reference.", tooltip="Weight of image reference.",
), ),
comfy_io.Custom(LumaIO.LUMA_REF).Input( IO.Custom(LumaIO.LUMA_REF).Input(
"luma_ref", "luma_ref",
optional=True, optional=True,
), ),
], ],
outputs=[comfy_io.Custom(LumaIO.LUMA_REF).Output(display_name="luma_ref")], outputs=[IO.Custom(LumaIO.LUMA_REF).Output(display_name="luma_ref")],
hidden=[ hidden=[
comfy_io.Hidden.auth_token_comfy_org, IO.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org, IO.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id, IO.Hidden.unique_id,
], ],
) )
@classmethod @classmethod
def execute( def execute(
cls, image: torch.Tensor, weight: float, luma_ref: LumaReferenceChain = None cls, image: torch.Tensor, weight: float, luma_ref: LumaReferenceChain = None
) -> comfy_io.NodeOutput: ) -> IO.NodeOutput:
if luma_ref is not None: if luma_ref is not None:
luma_ref = luma_ref.clone() luma_ref = luma_ref.clone()
else: else:
luma_ref = LumaReferenceChain() luma_ref = LumaReferenceChain()
luma_ref.add(LumaReference(image=image, weight=round(weight, 2))) luma_ref.add(LumaReference(image=image, weight=round(weight, 2)))
return comfy_io.NodeOutput(luma_ref) return IO.NodeOutput(luma_ref)
class LumaConceptsNode(comfy_io.ComfyNode): class LumaConceptsNode(IO.ComfyNode):
""" """
Holds one or more Camera Concepts for use with Luma Text to Video and Luma Image to Video nodes. Holds one or more Camera Concepts for use with Luma Text to Video and Luma Image to Video nodes.
""" """
@classmethod @classmethod
def define_schema(cls) -> comfy_io.Schema: def define_schema(cls) -> IO.Schema:
return comfy_io.Schema( return IO.Schema(
node_id="LumaConceptsNode", node_id="LumaConceptsNode",
display_name="Luma Concepts", display_name="Luma Concepts",
category="api node/video/Luma", category="api node/video/Luma",
description=cleandoc(cls.__doc__ or ""), description=cleandoc(cls.__doc__ or ""),
inputs=[ inputs=[
comfy_io.Combo.Input( IO.Combo.Input(
"concept1", "concept1",
options=get_luma_concepts(include_none=True), options=get_luma_concepts(include_none=True),
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"concept2", "concept2",
options=get_luma_concepts(include_none=True), options=get_luma_concepts(include_none=True),
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"concept3", "concept3",
options=get_luma_concepts(include_none=True), options=get_luma_concepts(include_none=True),
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"concept4", "concept4",
options=get_luma_concepts(include_none=True), options=get_luma_concepts(include_none=True),
), ),
comfy_io.Custom(LumaIO.LUMA_CONCEPTS).Input( IO.Custom(LumaIO.LUMA_CONCEPTS).Input(
"luma_concepts", "luma_concepts",
tooltip="Optional Camera Concepts to add to the ones chosen here.", tooltip="Optional Camera Concepts to add to the ones chosen here.",
optional=True, optional=True,
), ),
], ],
outputs=[comfy_io.Custom(LumaIO.LUMA_CONCEPTS).Output(display_name="luma_concepts")], outputs=[IO.Custom(LumaIO.LUMA_CONCEPTS).Output(display_name="luma_concepts")],
hidden=[ hidden=[
comfy_io.Hidden.auth_token_comfy_org, IO.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org, IO.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id, IO.Hidden.unique_id,
], ],
) )
@ -153,42 +153,42 @@ class LumaConceptsNode(comfy_io.ComfyNode):
concept3: str, concept3: str,
concept4: str, concept4: str,
luma_concepts: LumaConceptChain = None, luma_concepts: LumaConceptChain = None,
) -> comfy_io.NodeOutput: ) -> IO.NodeOutput:
chain = LumaConceptChain(str_list=[concept1, concept2, concept3, concept4]) chain = LumaConceptChain(str_list=[concept1, concept2, concept3, concept4])
if luma_concepts is not None: if luma_concepts is not None:
chain = luma_concepts.clone_and_merge(chain) chain = luma_concepts.clone_and_merge(chain)
return comfy_io.NodeOutput(chain) return IO.NodeOutput(chain)
class LumaImageGenerationNode(comfy_io.ComfyNode): class LumaImageGenerationNode(IO.ComfyNode):
""" """
Generates images synchronously based on prompt and aspect ratio. Generates images synchronously based on prompt and aspect ratio.
""" """
@classmethod @classmethod
def define_schema(cls) -> comfy_io.Schema: def define_schema(cls) -> IO.Schema:
return comfy_io.Schema( return IO.Schema(
node_id="LumaImageNode", node_id="LumaImageNode",
display_name="Luma Text to Image", display_name="Luma Text to Image",
category="api node/image/Luma", category="api node/image/Luma",
description=cleandoc(cls.__doc__ or ""), description=cleandoc(cls.__doc__ or ""),
inputs=[ inputs=[
comfy_io.String.Input( IO.String.Input(
"prompt", "prompt",
multiline=True, multiline=True,
default="", default="",
tooltip="Prompt for the image generation", tooltip="Prompt for the image generation",
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"model", "model",
options=LumaImageModel, options=LumaImageModel,
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"aspect_ratio", "aspect_ratio",
options=LumaAspectRatio, options=LumaAspectRatio,
default=LumaAspectRatio.ratio_16_9, default=LumaAspectRatio.ratio_16_9,
), ),
comfy_io.Int.Input( IO.Int.Input(
"seed", "seed",
default=0, default=0,
min=0, min=0,
@ -196,7 +196,7 @@ class LumaImageGenerationNode(comfy_io.ComfyNode):
control_after_generate=True, control_after_generate=True,
tooltip="Seed to determine if node should re-run; actual results are nondeterministic regardless of seed.", tooltip="Seed to determine if node should re-run; actual results are nondeterministic regardless of seed.",
), ),
comfy_io.Float.Input( IO.Float.Input(
"style_image_weight", "style_image_weight",
default=1.0, default=1.0,
min=0.0, min=0.0,
@ -204,27 +204,27 @@ class LumaImageGenerationNode(comfy_io.ComfyNode):
step=0.01, step=0.01,
tooltip="Weight of style image. Ignored if no style_image provided.", tooltip="Weight of style image. Ignored if no style_image provided.",
), ),
comfy_io.Custom(LumaIO.LUMA_REF).Input( IO.Custom(LumaIO.LUMA_REF).Input(
"image_luma_ref", "image_luma_ref",
tooltip="Luma Reference node connection to influence generation with input images; up to 4 images can be considered.", tooltip="Luma Reference node connection to influence generation with input images; up to 4 images can be considered.",
optional=True, optional=True,
), ),
comfy_io.Image.Input( IO.Image.Input(
"style_image", "style_image",
tooltip="Style reference image; only 1 image will be used.", tooltip="Style reference image; only 1 image will be used.",
optional=True, optional=True,
), ),
comfy_io.Image.Input( IO.Image.Input(
"character_image", "character_image",
tooltip="Character reference images; can be a batch of multiple, up to 4 images can be considered.", tooltip="Character reference images; can be a batch of multiple, up to 4 images can be considered.",
optional=True, optional=True,
), ),
], ],
outputs=[comfy_io.Image.Output()], outputs=[IO.Image.Output()],
hidden=[ hidden=[
comfy_io.Hidden.auth_token_comfy_org, IO.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org, IO.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id, IO.Hidden.unique_id,
], ],
is_api_node=True, is_api_node=True,
) )
@ -240,7 +240,7 @@ class LumaImageGenerationNode(comfy_io.ComfyNode):
image_luma_ref: LumaReferenceChain = None, image_luma_ref: LumaReferenceChain = None,
style_image: torch.Tensor = None, style_image: torch.Tensor = None,
character_image: torch.Tensor = None, character_image: torch.Tensor = None,
) -> comfy_io.NodeOutput: ) -> IO.NodeOutput:
validate_string(prompt, strip_whitespace=True, min_length=3) validate_string(prompt, strip_whitespace=True, min_length=3)
auth_kwargs = { auth_kwargs = {
"auth_token": cls.hidden.auth_token_comfy_org, "auth_token": cls.hidden.auth_token_comfy_org,
@ -306,7 +306,7 @@ class LumaImageGenerationNode(comfy_io.ComfyNode):
async with aiohttp.ClientSession() as session: async with aiohttp.ClientSession() as session:
async with session.get(response_poll.assets.image) as img_response: async with session.get(response_poll.assets.image) as img_response:
img = process_image_response(await img_response.content.read()) img = process_image_response(await img_response.content.read())
return comfy_io.NodeOutput(img) return IO.NodeOutput(img)
@classmethod @classmethod
async def _convert_luma_refs( async def _convert_luma_refs(
@ -334,29 +334,29 @@ class LumaImageGenerationNode(comfy_io.ComfyNode):
return await cls._convert_luma_refs(chain, max_refs=1, auth_kwargs=auth_kwargs) return await cls._convert_luma_refs(chain, max_refs=1, auth_kwargs=auth_kwargs)
class LumaImageModifyNode(comfy_io.ComfyNode): class LumaImageModifyNode(IO.ComfyNode):
""" """
Modifies images synchronously based on prompt and aspect ratio. Modifies images synchronously based on prompt and aspect ratio.
""" """
@classmethod @classmethod
def define_schema(cls) -> comfy_io.Schema: def define_schema(cls) -> IO.Schema:
return comfy_io.Schema( return IO.Schema(
node_id="LumaImageModifyNode", node_id="LumaImageModifyNode",
display_name="Luma Image to Image", display_name="Luma Image to Image",
category="api node/image/Luma", category="api node/image/Luma",
description=cleandoc(cls.__doc__ or ""), description=cleandoc(cls.__doc__ or ""),
inputs=[ inputs=[
comfy_io.Image.Input( IO.Image.Input(
"image", "image",
), ),
comfy_io.String.Input( IO.String.Input(
"prompt", "prompt",
multiline=True, multiline=True,
default="", default="",
tooltip="Prompt for the image generation", tooltip="Prompt for the image generation",
), ),
comfy_io.Float.Input( IO.Float.Input(
"image_weight", "image_weight",
default=0.1, default=0.1,
min=0.0, min=0.0,
@ -364,11 +364,11 @@ class LumaImageModifyNode(comfy_io.ComfyNode):
step=0.01, step=0.01,
tooltip="Weight of the image; the closer to 1.0, the less the image will be modified.", tooltip="Weight of the image; the closer to 1.0, the less the image will be modified.",
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"model", "model",
options=LumaImageModel, options=LumaImageModel,
), ),
comfy_io.Int.Input( IO.Int.Input(
"seed", "seed",
default=0, default=0,
min=0, min=0,
@ -377,11 +377,11 @@ class LumaImageModifyNode(comfy_io.ComfyNode):
tooltip="Seed to determine if node should re-run; actual results are nondeterministic regardless of seed.", tooltip="Seed to determine if node should re-run; actual results are nondeterministic regardless of seed.",
), ),
], ],
outputs=[comfy_io.Image.Output()], outputs=[IO.Image.Output()],
hidden=[ hidden=[
comfy_io.Hidden.auth_token_comfy_org, IO.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org, IO.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id, IO.Hidden.unique_id,
], ],
is_api_node=True, is_api_node=True,
) )
@ -394,7 +394,7 @@ class LumaImageModifyNode(comfy_io.ComfyNode):
image: torch.Tensor, image: torch.Tensor,
image_weight: float, image_weight: float,
seed, seed,
) -> comfy_io.NodeOutput: ) -> IO.NodeOutput:
auth_kwargs = { auth_kwargs = {
"auth_token": cls.hidden.auth_token_comfy_org, "auth_token": cls.hidden.auth_token_comfy_org,
"comfy_api_key": cls.hidden.api_key_comfy_org, "comfy_api_key": cls.hidden.api_key_comfy_org,
@ -442,51 +442,51 @@ class LumaImageModifyNode(comfy_io.ComfyNode):
async with aiohttp.ClientSession() as session: async with aiohttp.ClientSession() as session:
async with session.get(response_poll.assets.image) as img_response: async with session.get(response_poll.assets.image) as img_response:
img = process_image_response(await img_response.content.read()) img = process_image_response(await img_response.content.read())
return comfy_io.NodeOutput(img) return IO.NodeOutput(img)
class LumaTextToVideoGenerationNode(comfy_io.ComfyNode): class LumaTextToVideoGenerationNode(IO.ComfyNode):
""" """
Generates videos synchronously based on prompt and output_size. Generates videos synchronously based on prompt and output_size.
""" """
@classmethod @classmethod
def define_schema(cls) -> comfy_io.Schema: def define_schema(cls) -> IO.Schema:
return comfy_io.Schema( return IO.Schema(
node_id="LumaVideoNode", node_id="LumaVideoNode",
display_name="Luma Text to Video", display_name="Luma Text to Video",
category="api node/video/Luma", category="api node/video/Luma",
description=cleandoc(cls.__doc__ or ""), description=cleandoc(cls.__doc__ or ""),
inputs=[ inputs=[
comfy_io.String.Input( IO.String.Input(
"prompt", "prompt",
multiline=True, multiline=True,
default="", default="",
tooltip="Prompt for the video generation", tooltip="Prompt for the video generation",
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"model", "model",
options=LumaVideoModel, options=LumaVideoModel,
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"aspect_ratio", "aspect_ratio",
options=LumaAspectRatio, options=LumaAspectRatio,
default=LumaAspectRatio.ratio_16_9, default=LumaAspectRatio.ratio_16_9,
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"resolution", "resolution",
options=LumaVideoOutputResolution, options=LumaVideoOutputResolution,
default=LumaVideoOutputResolution.res_540p, default=LumaVideoOutputResolution.res_540p,
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"duration", "duration",
options=LumaVideoModelOutputDuration, options=LumaVideoModelOutputDuration,
), ),
comfy_io.Boolean.Input( IO.Boolean.Input(
"loop", "loop",
default=False, default=False,
), ),
comfy_io.Int.Input( IO.Int.Input(
"seed", "seed",
default=0, default=0,
min=0, min=0,
@ -494,17 +494,17 @@ class LumaTextToVideoGenerationNode(comfy_io.ComfyNode):
control_after_generate=True, control_after_generate=True,
tooltip="Seed to determine if node should re-run; actual results are nondeterministic regardless of seed.", tooltip="Seed to determine if node should re-run; actual results are nondeterministic regardless of seed.",
), ),
comfy_io.Custom(LumaIO.LUMA_CONCEPTS).Input( IO.Custom(LumaIO.LUMA_CONCEPTS).Input(
"luma_concepts", "luma_concepts",
tooltip="Optional Camera Concepts to dictate camera motion via the Luma Concepts node.", tooltip="Optional Camera Concepts to dictate camera motion via the Luma Concepts node.",
optional=True, optional=True,
) )
], ],
outputs=[comfy_io.Video.Output()], outputs=[IO.Video.Output()],
hidden=[ hidden=[
comfy_io.Hidden.auth_token_comfy_org, IO.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org, IO.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id, IO.Hidden.unique_id,
], ],
is_api_node=True, is_api_node=True,
) )
@ -520,7 +520,7 @@ class LumaTextToVideoGenerationNode(comfy_io.ComfyNode):
loop: bool, loop: bool,
seed, seed,
luma_concepts: LumaConceptChain = None, luma_concepts: LumaConceptChain = None,
) -> comfy_io.NodeOutput: ) -> IO.NodeOutput:
validate_string(prompt, strip_whitespace=False, min_length=3) validate_string(prompt, strip_whitespace=False, min_length=3)
duration = duration if model != LumaVideoModel.ray_1_6 else None duration = duration if model != LumaVideoModel.ray_1_6 else None
resolution = resolution if model != LumaVideoModel.ray_1_6 else None resolution = resolution if model != LumaVideoModel.ray_1_6 else None
@ -571,51 +571,51 @@ class LumaTextToVideoGenerationNode(comfy_io.ComfyNode):
async with aiohttp.ClientSession() as session: async with aiohttp.ClientSession() as session:
async with session.get(response_poll.assets.video) as vid_response: async with session.get(response_poll.assets.video) as vid_response:
return comfy_io.NodeOutput(VideoFromFile(BytesIO(await vid_response.content.read()))) return IO.NodeOutput(VideoFromFile(BytesIO(await vid_response.content.read())))
class LumaImageToVideoGenerationNode(comfy_io.ComfyNode): class LumaImageToVideoGenerationNode(IO.ComfyNode):
""" """
Generates videos synchronously based on prompt, input images, and output_size. Generates videos synchronously based on prompt, input images, and output_size.
""" """
@classmethod @classmethod
def define_schema(cls) -> comfy_io.Schema: def define_schema(cls) -> IO.Schema:
return comfy_io.Schema( return IO.Schema(
node_id="LumaImageToVideoNode", node_id="LumaImageToVideoNode",
display_name="Luma Image to Video", display_name="Luma Image to Video",
category="api node/video/Luma", category="api node/video/Luma",
description=cleandoc(cls.__doc__ or ""), description=cleandoc(cls.__doc__ or ""),
inputs=[ inputs=[
comfy_io.String.Input( IO.String.Input(
"prompt", "prompt",
multiline=True, multiline=True,
default="", default="",
tooltip="Prompt for the video generation", tooltip="Prompt for the video generation",
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"model", "model",
options=LumaVideoModel, options=LumaVideoModel,
), ),
# comfy_io.Combo.Input( # IO.Combo.Input(
# "aspect_ratio", # "aspect_ratio",
# options=[ratio.value for ratio in LumaAspectRatio], # options=[ratio.value for ratio in LumaAspectRatio],
# default=LumaAspectRatio.ratio_16_9, # default=LumaAspectRatio.ratio_16_9,
# ), # ),
comfy_io.Combo.Input( IO.Combo.Input(
"resolution", "resolution",
options=LumaVideoOutputResolution, options=LumaVideoOutputResolution,
default=LumaVideoOutputResolution.res_540p, default=LumaVideoOutputResolution.res_540p,
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"duration", "duration",
options=[dur.value for dur in LumaVideoModelOutputDuration], options=[dur.value for dur in LumaVideoModelOutputDuration],
), ),
comfy_io.Boolean.Input( IO.Boolean.Input(
"loop", "loop",
default=False, default=False,
), ),
comfy_io.Int.Input( IO.Int.Input(
"seed", "seed",
default=0, default=0,
min=0, min=0,
@ -623,27 +623,27 @@ class LumaImageToVideoGenerationNode(comfy_io.ComfyNode):
control_after_generate=True, control_after_generate=True,
tooltip="Seed to determine if node should re-run; actual results are nondeterministic regardless of seed.", tooltip="Seed to determine if node should re-run; actual results are nondeterministic regardless of seed.",
), ),
comfy_io.Image.Input( IO.Image.Input(
"first_image", "first_image",
tooltip="First frame of generated video.", tooltip="First frame of generated video.",
optional=True, optional=True,
), ),
comfy_io.Image.Input( IO.Image.Input(
"last_image", "last_image",
tooltip="Last frame of generated video.", tooltip="Last frame of generated video.",
optional=True, optional=True,
), ),
comfy_io.Custom(LumaIO.LUMA_CONCEPTS).Input( IO.Custom(LumaIO.LUMA_CONCEPTS).Input(
"luma_concepts", "luma_concepts",
tooltip="Optional Camera Concepts to dictate camera motion via the Luma Concepts node.", tooltip="Optional Camera Concepts to dictate camera motion via the Luma Concepts node.",
optional=True, optional=True,
) )
], ],
outputs=[comfy_io.Video.Output()], outputs=[IO.Video.Output()],
hidden=[ hidden=[
comfy_io.Hidden.auth_token_comfy_org, IO.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org, IO.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id, IO.Hidden.unique_id,
], ],
is_api_node=True, is_api_node=True,
) )
@ -660,7 +660,7 @@ class LumaImageToVideoGenerationNode(comfy_io.ComfyNode):
first_image: torch.Tensor = None, first_image: torch.Tensor = None,
last_image: torch.Tensor = None, last_image: torch.Tensor = None,
luma_concepts: LumaConceptChain = None, luma_concepts: LumaConceptChain = None,
) -> comfy_io.NodeOutput: ) -> IO.NodeOutput:
if first_image is None and last_image is None: if first_image is None and last_image is None:
raise Exception( raise Exception(
"At least one of first_image and last_image requires an input." "At least one of first_image and last_image requires an input."
@ -716,7 +716,7 @@ class LumaImageToVideoGenerationNode(comfy_io.ComfyNode):
async with aiohttp.ClientSession() as session: async with aiohttp.ClientSession() as session:
async with session.get(response_poll.assets.video) as vid_response: async with session.get(response_poll.assets.video) as vid_response:
return comfy_io.NodeOutput(VideoFromFile(BytesIO(await vid_response.content.read()))) return IO.NodeOutput(VideoFromFile(BytesIO(await vid_response.content.read())))
@classmethod @classmethod
async def _convert_to_keyframes( async def _convert_to_keyframes(
@ -744,7 +744,7 @@ class LumaImageToVideoGenerationNode(comfy_io.ComfyNode):
class LumaExtension(ComfyExtension): class LumaExtension(ComfyExtension):
@override @override
async def get_node_list(self) -> list[type[comfy_io.ComfyNode]]: async def get_node_list(self) -> list[type[IO.ComfyNode]]:
return [ return [
LumaImageGenerationNode, LumaImageGenerationNode,
LumaImageModifyNode, LumaImageModifyNode,

View File

@ -4,7 +4,7 @@ import logging
import torch import torch
from typing_extensions import override from typing_extensions import override
from comfy_api.latest import ComfyExtension, io as comfy_io from comfy_api.latest import ComfyExtension, IO
from comfy_api.input_impl.video_types import VideoFromFile from comfy_api.input_impl.video_types import VideoFromFile
from comfy_api_nodes.apis import ( from comfy_api_nodes.apis import (
MinimaxVideoGenerationRequest, MinimaxVideoGenerationRequest,
@ -43,7 +43,7 @@ async def _generate_mm_video(
image: Optional[torch.Tensor] = None, # used for ImageToVideo image: Optional[torch.Tensor] = None, # used for ImageToVideo
subject: Optional[torch.Tensor] = None, # used for SubjectToVideo subject: Optional[torch.Tensor] = None, # used for SubjectToVideo
average_duration: Optional[int] = None, average_duration: Optional[int] = None,
) -> comfy_io.NodeOutput: ) -> IO.NodeOutput:
if image is None: if image is None:
validate_string(prompt_text, field_name="prompt_text") validate_string(prompt_text, field_name="prompt_text")
# upload image, if passed in # upload image, if passed in
@ -133,35 +133,35 @@ async def _generate_mm_video(
error_msg = f"Failed to download video from {file_url}" error_msg = f"Failed to download video from {file_url}"
logging.error(error_msg) logging.error(error_msg)
raise Exception(error_msg) raise Exception(error_msg)
return comfy_io.NodeOutput(VideoFromFile(video_io)) return IO.NodeOutput(VideoFromFile(video_io))
class MinimaxTextToVideoNode(comfy_io.ComfyNode): class MinimaxTextToVideoNode(IO.ComfyNode):
""" """
Generates videos synchronously based on a prompt, and optional parameters using MiniMax's API. Generates videos synchronously based on a prompt, and optional parameters using MiniMax's API.
""" """
@classmethod @classmethod
def define_schema(cls) -> comfy_io.Schema: def define_schema(cls) -> IO.Schema:
return comfy_io.Schema( return IO.Schema(
node_id="MinimaxTextToVideoNode", node_id="MinimaxTextToVideoNode",
display_name="MiniMax Text to Video", display_name="MiniMax Text to Video",
category="api node/video/MiniMax", category="api node/video/MiniMax",
description=cleandoc(cls.__doc__ or ""), description=cleandoc(cls.__doc__ or ""),
inputs=[ inputs=[
comfy_io.String.Input( IO.String.Input(
"prompt_text", "prompt_text",
multiline=True, multiline=True,
default="", default="",
tooltip="Text prompt to guide the video generation", tooltip="Text prompt to guide the video generation",
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"model", "model",
options=["T2V-01", "T2V-01-Director"], options=["T2V-01", "T2V-01-Director"],
default="T2V-01", default="T2V-01",
tooltip="Model to use for video generation", tooltip="Model to use for video generation",
), ),
comfy_io.Int.Input( IO.Int.Input(
"seed", "seed",
default=0, default=0,
min=0, min=0,
@ -172,11 +172,11 @@ class MinimaxTextToVideoNode(comfy_io.ComfyNode):
optional=True, optional=True,
), ),
], ],
outputs=[comfy_io.Video.Output()], outputs=[IO.Video.Output()],
hidden=[ hidden=[
comfy_io.Hidden.auth_token_comfy_org, IO.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org, IO.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id, IO.Hidden.unique_id,
], ],
is_api_node=True, is_api_node=True,
) )
@ -187,7 +187,7 @@ class MinimaxTextToVideoNode(comfy_io.ComfyNode):
prompt_text: str, prompt_text: str,
model: str = "T2V-01", model: str = "T2V-01",
seed: int = 0, seed: int = 0,
) -> comfy_io.NodeOutput: ) -> IO.NodeOutput:
return await _generate_mm_video( return await _generate_mm_video(
auth={ auth={
"auth_token": cls.hidden.auth_token_comfy_org, "auth_token": cls.hidden.auth_token_comfy_org,
@ -203,36 +203,36 @@ class MinimaxTextToVideoNode(comfy_io.ComfyNode):
) )
class MinimaxImageToVideoNode(comfy_io.ComfyNode): class MinimaxImageToVideoNode(IO.ComfyNode):
""" """
Generates videos synchronously based on an image and prompt, and optional parameters using MiniMax's API. Generates videos synchronously based on an image and prompt, and optional parameters using MiniMax's API.
""" """
@classmethod @classmethod
def define_schema(cls) -> comfy_io.Schema: def define_schema(cls) -> IO.Schema:
return comfy_io.Schema( return IO.Schema(
node_id="MinimaxImageToVideoNode", node_id="MinimaxImageToVideoNode",
display_name="MiniMax Image to Video", display_name="MiniMax Image to Video",
category="api node/video/MiniMax", category="api node/video/MiniMax",
description=cleandoc(cls.__doc__ or ""), description=cleandoc(cls.__doc__ or ""),
inputs=[ inputs=[
comfy_io.Image.Input( IO.Image.Input(
"image", "image",
tooltip="Image to use as first frame of video generation", tooltip="Image to use as first frame of video generation",
), ),
comfy_io.String.Input( IO.String.Input(
"prompt_text", "prompt_text",
multiline=True, multiline=True,
default="", default="",
tooltip="Text prompt to guide the video generation", tooltip="Text prompt to guide the video generation",
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"model", "model",
options=["I2V-01-Director", "I2V-01", "I2V-01-live"], options=["I2V-01-Director", "I2V-01", "I2V-01-live"],
default="I2V-01", default="I2V-01",
tooltip="Model to use for video generation", tooltip="Model to use for video generation",
), ),
comfy_io.Int.Input( IO.Int.Input(
"seed", "seed",
default=0, default=0,
min=0, min=0,
@ -243,11 +243,11 @@ class MinimaxImageToVideoNode(comfy_io.ComfyNode):
optional=True, optional=True,
), ),
], ],
outputs=[comfy_io.Video.Output()], outputs=[IO.Video.Output()],
hidden=[ hidden=[
comfy_io.Hidden.auth_token_comfy_org, IO.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org, IO.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id, IO.Hidden.unique_id,
], ],
is_api_node=True, is_api_node=True,
) )
@ -259,7 +259,7 @@ class MinimaxImageToVideoNode(comfy_io.ComfyNode):
prompt_text: str, prompt_text: str,
model: str = "I2V-01", model: str = "I2V-01",
seed: int = 0, seed: int = 0,
) -> comfy_io.NodeOutput: ) -> IO.NodeOutput:
return await _generate_mm_video( return await _generate_mm_video(
auth={ auth={
"auth_token": cls.hidden.auth_token_comfy_org, "auth_token": cls.hidden.auth_token_comfy_org,
@ -275,36 +275,36 @@ class MinimaxImageToVideoNode(comfy_io.ComfyNode):
) )
class MinimaxSubjectToVideoNode(comfy_io.ComfyNode): class MinimaxSubjectToVideoNode(IO.ComfyNode):
""" """
Generates videos synchronously based on an image and prompt, and optional parameters using MiniMax's API. Generates videos synchronously based on an image and prompt, and optional parameters using MiniMax's API.
""" """
@classmethod @classmethod
def define_schema(cls) -> comfy_io.Schema: def define_schema(cls) -> IO.Schema:
return comfy_io.Schema( return IO.Schema(
node_id="MinimaxSubjectToVideoNode", node_id="MinimaxSubjectToVideoNode",
display_name="MiniMax Subject to Video", display_name="MiniMax Subject to Video",
category="api node/video/MiniMax", category="api node/video/MiniMax",
description=cleandoc(cls.__doc__ or ""), description=cleandoc(cls.__doc__ or ""),
inputs=[ inputs=[
comfy_io.Image.Input( IO.Image.Input(
"subject", "subject",
tooltip="Image of subject to reference for video generation", tooltip="Image of subject to reference for video generation",
), ),
comfy_io.String.Input( IO.String.Input(
"prompt_text", "prompt_text",
multiline=True, multiline=True,
default="", default="",
tooltip="Text prompt to guide the video generation", tooltip="Text prompt to guide the video generation",
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"model", "model",
options=["S2V-01"], options=["S2V-01"],
default="S2V-01", default="S2V-01",
tooltip="Model to use for video generation", tooltip="Model to use for video generation",
), ),
comfy_io.Int.Input( IO.Int.Input(
"seed", "seed",
default=0, default=0,
min=0, min=0,
@ -315,11 +315,11 @@ class MinimaxSubjectToVideoNode(comfy_io.ComfyNode):
optional=True, optional=True,
), ),
], ],
outputs=[comfy_io.Video.Output()], outputs=[IO.Video.Output()],
hidden=[ hidden=[
comfy_io.Hidden.auth_token_comfy_org, IO.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org, IO.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id, IO.Hidden.unique_id,
], ],
is_api_node=True, is_api_node=True,
) )
@ -331,7 +331,7 @@ class MinimaxSubjectToVideoNode(comfy_io.ComfyNode):
prompt_text: str, prompt_text: str,
model: str = "S2V-01", model: str = "S2V-01",
seed: int = 0, seed: int = 0,
) -> comfy_io.NodeOutput: ) -> IO.NodeOutput:
return await _generate_mm_video( return await _generate_mm_video(
auth={ auth={
"auth_token": cls.hidden.auth_token_comfy_org, "auth_token": cls.hidden.auth_token_comfy_org,
@ -347,24 +347,24 @@ class MinimaxSubjectToVideoNode(comfy_io.ComfyNode):
) )
class MinimaxHailuoVideoNode(comfy_io.ComfyNode): class MinimaxHailuoVideoNode(IO.ComfyNode):
"""Generates videos from prompt, with optional start frame using the new MiniMax Hailuo-02 model.""" """Generates videos from prompt, with optional start frame using the new MiniMax Hailuo-02 model."""
@classmethod @classmethod
def define_schema(cls) -> comfy_io.Schema: def define_schema(cls) -> IO.Schema:
return comfy_io.Schema( return IO.Schema(
node_id="MinimaxHailuoVideoNode", node_id="MinimaxHailuoVideoNode",
display_name="MiniMax Hailuo Video", display_name="MiniMax Hailuo Video",
category="api node/video/MiniMax", category="api node/video/MiniMax",
description=cleandoc(cls.__doc__ or ""), description=cleandoc(cls.__doc__ or ""),
inputs=[ inputs=[
comfy_io.String.Input( IO.String.Input(
"prompt_text", "prompt_text",
multiline=True, multiline=True,
default="", default="",
tooltip="Text prompt to guide the video generation.", tooltip="Text prompt to guide the video generation.",
), ),
comfy_io.Int.Input( IO.Int.Input(
"seed", "seed",
default=0, default=0,
min=0, min=0,
@ -374,25 +374,25 @@ class MinimaxHailuoVideoNode(comfy_io.ComfyNode):
tooltip="The random seed used for creating the noise.", tooltip="The random seed used for creating the noise.",
optional=True, optional=True,
), ),
comfy_io.Image.Input( IO.Image.Input(
"first_frame_image", "first_frame_image",
tooltip="Optional image to use as the first frame to generate a video.", tooltip="Optional image to use as the first frame to generate a video.",
optional=True, optional=True,
), ),
comfy_io.Boolean.Input( IO.Boolean.Input(
"prompt_optimizer", "prompt_optimizer",
default=True, default=True,
tooltip="Optimize prompt to improve generation quality when needed.", tooltip="Optimize prompt to improve generation quality when needed.",
optional=True, optional=True,
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"duration", "duration",
options=[6, 10], options=[6, 10],
default=6, default=6,
tooltip="The length of the output video in seconds.", tooltip="The length of the output video in seconds.",
optional=True, optional=True,
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"resolution", "resolution",
options=["768P", "1080P"], options=["768P", "1080P"],
default="768P", default="768P",
@ -400,11 +400,11 @@ class MinimaxHailuoVideoNode(comfy_io.ComfyNode):
optional=True, optional=True,
), ),
], ],
outputs=[comfy_io.Video.Output()], outputs=[IO.Video.Output()],
hidden=[ hidden=[
comfy_io.Hidden.auth_token_comfy_org, IO.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org, IO.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id, IO.Hidden.unique_id,
], ],
is_api_node=True, is_api_node=True,
) )
@ -419,7 +419,7 @@ class MinimaxHailuoVideoNode(comfy_io.ComfyNode):
duration: int = 6, duration: int = 6,
resolution: str = "768P", resolution: str = "768P",
model: str = "MiniMax-Hailuo-02", model: str = "MiniMax-Hailuo-02",
) -> comfy_io.NodeOutput: ) -> IO.NodeOutput:
auth = { auth = {
"auth_token": cls.hidden.auth_token_comfy_org, "auth_token": cls.hidden.auth_token_comfy_org,
"comfy_api_key": cls.hidden.api_key_comfy_org, "comfy_api_key": cls.hidden.api_key_comfy_org,
@ -513,12 +513,12 @@ class MinimaxHailuoVideoNode(comfy_io.ComfyNode):
error_msg = f"Failed to download video from {file_url}" error_msg = f"Failed to download video from {file_url}"
logging.error(error_msg) logging.error(error_msg)
raise Exception(error_msg) raise Exception(error_msg)
return comfy_io.NodeOutput(VideoFromFile(video_io)) return IO.NodeOutput(VideoFromFile(video_io))
class MinimaxExtension(ComfyExtension): class MinimaxExtension(ComfyExtension):
@override @override
async def get_node_list(self) -> list[type[comfy_io.ComfyNode]]: async def get_node_list(self) -> list[type[IO.ComfyNode]]:
return [ return [
MinimaxTextToVideoNode, MinimaxTextToVideoNode,
MinimaxImageToVideoNode, MinimaxImageToVideoNode,

View File

@ -22,10 +22,11 @@ from comfy_api_nodes.apinode_utils import (
download_url_to_video_output, download_url_to_video_output,
upload_images_to_comfyapi, upload_images_to_comfyapi,
upload_video_to_comfyapi, upload_video_to_comfyapi,
validate_container_format_is_mp4,
) )
from comfy_api.input import VideoInput from comfy_api.input import VideoInput
from comfy_api.latest import ComfyExtension, InputImpl, io as comfy_io from comfy_api.latest import ComfyExtension, InputImpl, IO
import av import av
import io import io
@ -144,7 +145,7 @@ def validate_video_to_video_input(video: VideoInput) -> VideoInput:
""" """
width, height = _get_video_dimensions(video) width, height = _get_video_dimensions(video)
_validate_video_dimensions(width, height) _validate_video_dimensions(width, height)
_validate_container_format(video) validate_container_format_is_mp4(video)
return _validate_and_trim_duration(video) return _validate_and_trim_duration(video)
@ -177,15 +178,6 @@ def _validate_video_dimensions(width: int, height: int) -> None:
) )
def _validate_container_format(video: VideoInput) -> None:
"""Validates video container format is MP4."""
container_format = video.get_container_format()
if container_format not in ["mp4", "mov,mp4,m4a,3gp,3g2,mj2"]:
raise ValueError(
f"Only MP4 container format supported. Got: {container_format}"
)
def _validate_and_trim_duration(video: VideoInput) -> VideoInput: def _validate_and_trim_duration(video: VideoInput) -> VideoInput:
"""Validates video duration and trims to 5 seconds if needed.""" """Validates video duration and trims to 5 seconds if needed."""
duration = video.get_duration() duration = video.get_duration()
@ -362,25 +354,25 @@ async def get_response(
) )
class MoonvalleyImg2VideoNode(comfy_io.ComfyNode): class MoonvalleyImg2VideoNode(IO.ComfyNode):
@classmethod @classmethod
def define_schema(cls) -> comfy_io.Schema: def define_schema(cls) -> IO.Schema:
return comfy_io.Schema( return IO.Schema(
node_id="MoonvalleyImg2VideoNode", node_id="MoonvalleyImg2VideoNode",
display_name="Moonvalley Marey Image to Video", display_name="Moonvalley Marey Image to Video",
category="api node/video/Moonvalley Marey", category="api node/video/Moonvalley Marey",
description="Moonvalley Marey Image to Video Node", description="Moonvalley Marey Image to Video Node",
inputs=[ inputs=[
comfy_io.Image.Input( IO.Image.Input(
"image", "image",
tooltip="The reference image used to generate the video", tooltip="The reference image used to generate the video",
), ),
comfy_io.String.Input( IO.String.Input(
"prompt", "prompt",
multiline=True, multiline=True,
), ),
comfy_io.String.Input( IO.String.Input(
"negative_prompt", "negative_prompt",
multiline=True, multiline=True,
default="<synthetic> <scene cut> gopro, bright, contrast, static, overexposed, vignette, " default="<synthetic> <scene cut> gopro, bright, contrast, static, overexposed, vignette, "
@ -391,7 +383,7 @@ class MoonvalleyImg2VideoNode(comfy_io.ComfyNode):
"wobbly, weird, low quality, plastic, stock footage, video camera, boring", "wobbly, weird, low quality, plastic, stock footage, video camera, boring",
tooltip="Negative prompt text", tooltip="Negative prompt text",
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"resolution", "resolution",
options=[ options=[
"16:9 (1920 x 1080)", "16:9 (1920 x 1080)",
@ -404,7 +396,7 @@ class MoonvalleyImg2VideoNode(comfy_io.ComfyNode):
default="16:9 (1920 x 1080)", default="16:9 (1920 x 1080)",
tooltip="Resolution of the output video", tooltip="Resolution of the output video",
), ),
comfy_io.Float.Input( IO.Float.Input(
"prompt_adherence", "prompt_adherence",
default=4.5, default=4.5,
min=1.0, min=1.0,
@ -412,17 +404,17 @@ class MoonvalleyImg2VideoNode(comfy_io.ComfyNode):
step=1.0, step=1.0,
tooltip="Guidance scale for generation control", tooltip="Guidance scale for generation control",
), ),
comfy_io.Int.Input( IO.Int.Input(
"seed", "seed",
default=9, default=9,
min=0, min=0,
max=4294967295, max=4294967295,
step=1, step=1,
display_mode=comfy_io.NumberDisplay.number, display_mode=IO.NumberDisplay.number,
tooltip="Random seed value", tooltip="Random seed value",
control_after_generate=True, control_after_generate=True,
), ),
comfy_io.Int.Input( IO.Int.Input(
"steps", "steps",
default=33, default=33,
min=1, min=1,
@ -431,11 +423,11 @@ class MoonvalleyImg2VideoNode(comfy_io.ComfyNode):
tooltip="Number of denoising steps", tooltip="Number of denoising steps",
), ),
], ],
outputs=[comfy_io.Video.Output()], outputs=[IO.Video.Output()],
hidden=[ hidden=[
comfy_io.Hidden.auth_token_comfy_org, IO.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org, IO.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id, IO.Hidden.unique_id,
], ],
is_api_node=True, is_api_node=True,
) )
@ -450,7 +442,7 @@ class MoonvalleyImg2VideoNode(comfy_io.ComfyNode):
prompt_adherence: float, prompt_adherence: float,
seed: int, seed: int,
steps: int, steps: int,
) -> comfy_io.NodeOutput: ) -> IO.NodeOutput:
validate_image_dimensions(image, min_width=300, min_height=300, max_height=MAX_HEIGHT, max_width=MAX_WIDTH) validate_image_dimensions(image, min_width=300, min_height=300, max_height=MAX_HEIGHT, max_width=MAX_WIDTH)
validate_prompts(prompt, negative_prompt, MOONVALLEY_MAREY_MAX_PROMPT_LENGTH) validate_prompts(prompt, negative_prompt, MOONVALLEY_MAREY_MAX_PROMPT_LENGTH)
width_height = parse_width_height_from_res(resolution) width_height = parse_width_height_from_res(resolution)
@ -500,25 +492,25 @@ class MoonvalleyImg2VideoNode(comfy_io.ComfyNode):
task_id, auth_kwargs=auth, node_id=cls.hidden.unique_id task_id, auth_kwargs=auth, node_id=cls.hidden.unique_id
) )
video = await download_url_to_video_output(final_response.output_url) video = await download_url_to_video_output(final_response.output_url)
return comfy_io.NodeOutput(video) return IO.NodeOutput(video)
class MoonvalleyVideo2VideoNode(comfy_io.ComfyNode): class MoonvalleyVideo2VideoNode(IO.ComfyNode):
@classmethod @classmethod
def define_schema(cls) -> comfy_io.Schema: def define_schema(cls) -> IO.Schema:
return comfy_io.Schema( return IO.Schema(
node_id="MoonvalleyVideo2VideoNode", node_id="MoonvalleyVideo2VideoNode",
display_name="Moonvalley Marey Video to Video", display_name="Moonvalley Marey Video to Video",
category="api node/video/Moonvalley Marey", category="api node/video/Moonvalley Marey",
description="", description="",
inputs=[ inputs=[
comfy_io.String.Input( IO.String.Input(
"prompt", "prompt",
multiline=True, multiline=True,
tooltip="Describes the video to generate", tooltip="Describes the video to generate",
), ),
comfy_io.String.Input( IO.String.Input(
"negative_prompt", "negative_prompt",
multiline=True, multiline=True,
default="<synthetic> <scene cut> gopro, bright, contrast, static, overexposed, vignette, " default="<synthetic> <scene cut> gopro, bright, contrast, static, overexposed, vignette, "
@ -529,28 +521,28 @@ class MoonvalleyVideo2VideoNode(comfy_io.ComfyNode):
"wobbly, weird, low quality, plastic, stock footage, video camera, boring", "wobbly, weird, low quality, plastic, stock footage, video camera, boring",
tooltip="Negative prompt text", tooltip="Negative prompt text",
), ),
comfy_io.Int.Input( IO.Int.Input(
"seed", "seed",
default=9, default=9,
min=0, min=0,
max=4294967295, max=4294967295,
step=1, step=1,
display_mode=comfy_io.NumberDisplay.number, display_mode=IO.NumberDisplay.number,
tooltip="Random seed value", tooltip="Random seed value",
control_after_generate=False, control_after_generate=False,
), ),
comfy_io.Video.Input( IO.Video.Input(
"video", "video",
tooltip="The reference video used to generate the output video. Must be at least 5 seconds long. " tooltip="The reference video used to generate the output video. Must be at least 5 seconds long. "
"Videos longer than 5s will be automatically trimmed. Only MP4 format supported.", "Videos longer than 5s will be automatically trimmed. Only MP4 format supported.",
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"control_type", "control_type",
options=["Motion Transfer", "Pose Transfer"], options=["Motion Transfer", "Pose Transfer"],
default="Motion Transfer", default="Motion Transfer",
optional=True, optional=True,
), ),
comfy_io.Int.Input( IO.Int.Input(
"motion_intensity", "motion_intensity",
default=100, default=100,
min=0, min=0,
@ -559,21 +551,21 @@ class MoonvalleyVideo2VideoNode(comfy_io.ComfyNode):
tooltip="Only used if control_type is 'Motion Transfer'", tooltip="Only used if control_type is 'Motion Transfer'",
optional=True, optional=True,
), ),
comfy_io.Int.Input( IO.Int.Input(
"steps", "steps",
default=33, default=33,
min=1, min=1,
max=100, max=100,
step=1, step=1,
display_mode=comfy_io.NumberDisplay.number, display_mode=IO.NumberDisplay.number,
tooltip="Number of inference steps", tooltip="Number of inference steps",
), ),
], ],
outputs=[comfy_io.Video.Output()], outputs=[IO.Video.Output()],
hidden=[ hidden=[
comfy_io.Hidden.auth_token_comfy_org, IO.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org, IO.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id, IO.Hidden.unique_id,
], ],
is_api_node=True, is_api_node=True,
) )
@ -589,7 +581,7 @@ class MoonvalleyVideo2VideoNode(comfy_io.ComfyNode):
motion_intensity: Optional[int] = 100, motion_intensity: Optional[int] = 100,
steps=33, steps=33,
prompt_adherence=4.5, prompt_adherence=4.5,
) -> comfy_io.NodeOutput: ) -> IO.NodeOutput:
auth = { auth = {
"auth_token": cls.hidden.auth_token_comfy_org, "auth_token": cls.hidden.auth_token_comfy_org,
"comfy_api_key": cls.hidden.api_key_comfy_org, "comfy_api_key": cls.hidden.api_key_comfy_org,
@ -641,24 +633,24 @@ class MoonvalleyVideo2VideoNode(comfy_io.ComfyNode):
) )
video = await download_url_to_video_output(final_response.output_url) video = await download_url_to_video_output(final_response.output_url)
return comfy_io.NodeOutput(video) return IO.NodeOutput(video)
class MoonvalleyTxt2VideoNode(comfy_io.ComfyNode): class MoonvalleyTxt2VideoNode(IO.ComfyNode):
@classmethod @classmethod
def define_schema(cls) -> comfy_io.Schema: def define_schema(cls) -> IO.Schema:
return comfy_io.Schema( return IO.Schema(
node_id="MoonvalleyTxt2VideoNode", node_id="MoonvalleyTxt2VideoNode",
display_name="Moonvalley Marey Text to Video", display_name="Moonvalley Marey Text to Video",
category="api node/video/Moonvalley Marey", category="api node/video/Moonvalley Marey",
description="", description="",
inputs=[ inputs=[
comfy_io.String.Input( IO.String.Input(
"prompt", "prompt",
multiline=True, multiline=True,
), ),
comfy_io.String.Input( IO.String.Input(
"negative_prompt", "negative_prompt",
multiline=True, multiline=True,
default="<synthetic> <scene cut> gopro, bright, contrast, static, overexposed, vignette, " default="<synthetic> <scene cut> gopro, bright, contrast, static, overexposed, vignette, "
@ -669,7 +661,7 @@ class MoonvalleyTxt2VideoNode(comfy_io.ComfyNode):
"wobbly, weird, low quality, plastic, stock footage, video camera, boring", "wobbly, weird, low quality, plastic, stock footage, video camera, boring",
tooltip="Negative prompt text", tooltip="Negative prompt text",
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"resolution", "resolution",
options=[ options=[
"16:9 (1920 x 1080)", "16:9 (1920 x 1080)",
@ -682,7 +674,7 @@ class MoonvalleyTxt2VideoNode(comfy_io.ComfyNode):
default="16:9 (1920 x 1080)", default="16:9 (1920 x 1080)",
tooltip="Resolution of the output video", tooltip="Resolution of the output video",
), ),
comfy_io.Float.Input( IO.Float.Input(
"prompt_adherence", "prompt_adherence",
default=4.0, default=4.0,
min=1.0, min=1.0,
@ -690,17 +682,17 @@ class MoonvalleyTxt2VideoNode(comfy_io.ComfyNode):
step=1.0, step=1.0,
tooltip="Guidance scale for generation control", tooltip="Guidance scale for generation control",
), ),
comfy_io.Int.Input( IO.Int.Input(
"seed", "seed",
default=9, default=9,
min=0, min=0,
max=4294967295, max=4294967295,
step=1, step=1,
display_mode=comfy_io.NumberDisplay.number, display_mode=IO.NumberDisplay.number,
control_after_generate=True, control_after_generate=True,
tooltip="Random seed value", tooltip="Random seed value",
), ),
comfy_io.Int.Input( IO.Int.Input(
"steps", "steps",
default=33, default=33,
min=1, min=1,
@ -709,11 +701,11 @@ class MoonvalleyTxt2VideoNode(comfy_io.ComfyNode):
tooltip="Inference steps", tooltip="Inference steps",
), ),
], ],
outputs=[comfy_io.Video.Output()], outputs=[IO.Video.Output()],
hidden=[ hidden=[
comfy_io.Hidden.auth_token_comfy_org, IO.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org, IO.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id, IO.Hidden.unique_id,
], ],
is_api_node=True, is_api_node=True,
) )
@ -727,7 +719,7 @@ class MoonvalleyTxt2VideoNode(comfy_io.ComfyNode):
prompt_adherence: float, prompt_adherence: float,
seed: int, seed: int,
steps: int, steps: int,
) -> comfy_io.NodeOutput: ) -> IO.NodeOutput:
validate_prompts(prompt, negative_prompt, MOONVALLEY_MAREY_MAX_PROMPT_LENGTH) validate_prompts(prompt, negative_prompt, MOONVALLEY_MAREY_MAX_PROMPT_LENGTH)
width_height = parse_width_height_from_res(resolution) width_height = parse_width_height_from_res(resolution)
@ -768,12 +760,12 @@ class MoonvalleyTxt2VideoNode(comfy_io.ComfyNode):
) )
video = await download_url_to_video_output(final_response.output_url) video = await download_url_to_video_output(final_response.output_url)
return comfy_io.NodeOutput(video) return IO.NodeOutput(video)
class MoonvalleyExtension(ComfyExtension): class MoonvalleyExtension(ComfyExtension):
@override @override
async def get_node_list(self) -> list[type[comfy_io.ComfyNode]]: async def get_node_list(self) -> list[type[IO.ComfyNode]]:
return [ return [
MoonvalleyImg2VideoNode, MoonvalleyImg2VideoNode,
MoonvalleyTxt2VideoNode, MoonvalleyTxt2VideoNode,

View File

@ -12,7 +12,7 @@ from typing import Optional, TypeVar
import torch import torch
from typing_extensions import override from typing_extensions import override
from comfy_api.latest import ComfyExtension, comfy_io from comfy_api.latest import ComfyExtension, IO
from comfy_api.input_impl.video_types import VideoCodec, VideoContainer, VideoInput from comfy_api.input_impl.video_types import VideoCodec, VideoContainer, VideoInput
from comfy_api_nodes.apinode_utils import ( from comfy_api_nodes.apinode_utils import (
download_url_to_video_output, download_url_to_video_output,
@ -47,7 +47,7 @@ async def execute_task(
initial_operation: SynchronousOperation[R, pika_defs.PikaGenerateResponse], initial_operation: SynchronousOperation[R, pika_defs.PikaGenerateResponse],
auth_kwargs: Optional[dict[str, str]] = None, auth_kwargs: Optional[dict[str, str]] = None,
node_id: Optional[str] = None, node_id: Optional[str] = None,
) -> comfy_io.NodeOutput: ) -> IO.NodeOutput:
task_id = (await initial_operation.execute()).video_id task_id = (await initial_operation.execute()).video_id
final_response: pika_defs.PikaVideoResponse = await PollingOperation( final_response: pika_defs.PikaVideoResponse = await PollingOperation(
poll_endpoint=ApiEndpoint( poll_endpoint=ApiEndpoint(
@ -72,39 +72,39 @@ async def execute_task(
raise Exception(error_msg) raise Exception(error_msg)
video_url = final_response.url video_url = final_response.url
logging.info("Pika task %s succeeded. Video URL: %s", task_id, video_url) logging.info("Pika task %s succeeded. Video URL: %s", task_id, video_url)
return comfy_io.NodeOutput(await download_url_to_video_output(video_url)) return IO.NodeOutput(await download_url_to_video_output(video_url))
def get_base_inputs_types() -> list[comfy_io.Input]: def get_base_inputs_types() -> list[IO.Input]:
"""Get the base required inputs types common to all Pika nodes.""" """Get the base required inputs types common to all Pika nodes."""
return [ return [
comfy_io.String.Input("prompt_text", multiline=True), IO.String.Input("prompt_text", multiline=True),
comfy_io.String.Input("negative_prompt", multiline=True), IO.String.Input("negative_prompt", multiline=True),
comfy_io.Int.Input("seed", min=0, max=0xFFFFFFFF, control_after_generate=True), IO.Int.Input("seed", min=0, max=0xFFFFFFFF, control_after_generate=True),
comfy_io.Combo.Input("resolution", options=["1080p", "720p"], default="1080p"), IO.Combo.Input("resolution", options=["1080p", "720p"], default="1080p"),
comfy_io.Combo.Input("duration", options=[5, 10], default=5), IO.Combo.Input("duration", options=[5, 10], default=5),
] ]
class PikaImageToVideo(comfy_io.ComfyNode): class PikaImageToVideo(IO.ComfyNode):
"""Pika 2.2 Image to Video Node.""" """Pika 2.2 Image to Video Node."""
@classmethod @classmethod
def define_schema(cls) -> comfy_io.Schema: def define_schema(cls) -> IO.Schema:
return comfy_io.Schema( return IO.Schema(
node_id="PikaImageToVideoNode2_2", node_id="PikaImageToVideoNode2_2",
display_name="Pika Image to Video", display_name="Pika Image to Video",
description="Sends an image and prompt to the Pika API v2.2 to generate a video.", description="Sends an image and prompt to the Pika API v2.2 to generate a video.",
category="api node/video/Pika", category="api node/video/Pika",
inputs=[ inputs=[
comfy_io.Image.Input("image", tooltip="The image to convert to video"), IO.Image.Input("image", tooltip="The image to convert to video"),
*get_base_inputs_types(), *get_base_inputs_types(),
], ],
outputs=[comfy_io.Video.Output()], outputs=[IO.Video.Output()],
hidden=[ hidden=[
comfy_io.Hidden.auth_token_comfy_org, IO.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org, IO.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id, IO.Hidden.unique_id,
], ],
is_api_node=True, is_api_node=True,
) )
@ -118,7 +118,7 @@ class PikaImageToVideo(comfy_io.ComfyNode):
seed: int, seed: int,
resolution: str, resolution: str,
duration: int, duration: int,
) -> comfy_io.NodeOutput: ) -> IO.NodeOutput:
image_bytes_io = tensor_to_bytesio(image) image_bytes_io = tensor_to_bytesio(image)
pika_files = {"image": ("image.png", image_bytes_io, "image/png")} pika_files = {"image": ("image.png", image_bytes_io, "image/png")}
pika_request_data = pika_defs.PikaBodyGenerate22I2vGenerate22I2vPost( pika_request_data = pika_defs.PikaBodyGenerate22I2vGenerate22I2vPost(
@ -147,19 +147,19 @@ class PikaImageToVideo(comfy_io.ComfyNode):
return await execute_task(initial_operation, auth_kwargs=auth, node_id=cls.hidden.unique_id) return await execute_task(initial_operation, auth_kwargs=auth, node_id=cls.hidden.unique_id)
class PikaTextToVideoNode(comfy_io.ComfyNode): class PikaTextToVideoNode(IO.ComfyNode):
"""Pika Text2Video v2.2 Node.""" """Pika Text2Video v2.2 Node."""
@classmethod @classmethod
def define_schema(cls) -> comfy_io.Schema: def define_schema(cls) -> IO.Schema:
return comfy_io.Schema( return IO.Schema(
node_id="PikaTextToVideoNode2_2", node_id="PikaTextToVideoNode2_2",
display_name="Pika Text to Video", display_name="Pika Text to Video",
description="Sends a text prompt to the Pika API v2.2 to generate a video.", description="Sends a text prompt to the Pika API v2.2 to generate a video.",
category="api node/video/Pika", category="api node/video/Pika",
inputs=[ inputs=[
*get_base_inputs_types(), *get_base_inputs_types(),
comfy_io.Float.Input( IO.Float.Input(
"aspect_ratio", "aspect_ratio",
step=0.001, step=0.001,
min=0.4, min=0.4,
@ -168,11 +168,11 @@ class PikaTextToVideoNode(comfy_io.ComfyNode):
tooltip="Aspect ratio (width / height)", tooltip="Aspect ratio (width / height)",
) )
], ],
outputs=[comfy_io.Video.Output()], outputs=[IO.Video.Output()],
hidden=[ hidden=[
comfy_io.Hidden.auth_token_comfy_org, IO.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org, IO.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id, IO.Hidden.unique_id,
], ],
is_api_node=True, is_api_node=True,
) )
@ -186,7 +186,7 @@ class PikaTextToVideoNode(comfy_io.ComfyNode):
resolution: str, resolution: str,
duration: int, duration: int,
aspect_ratio: float, aspect_ratio: float,
) -> comfy_io.NodeOutput: ) -> IO.NodeOutput:
auth = { auth = {
"auth_token": cls.hidden.auth_token_comfy_org, "auth_token": cls.hidden.auth_token_comfy_org,
"comfy_api_key": cls.hidden.api_key_comfy_org, "comfy_api_key": cls.hidden.api_key_comfy_org,
@ -212,24 +212,24 @@ class PikaTextToVideoNode(comfy_io.ComfyNode):
return await execute_task(initial_operation, auth_kwargs=auth, node_id=cls.hidden.unique_id) return await execute_task(initial_operation, auth_kwargs=auth, node_id=cls.hidden.unique_id)
class PikaScenes(comfy_io.ComfyNode): class PikaScenes(IO.ComfyNode):
"""PikaScenes v2.2 Node.""" """PikaScenes v2.2 Node."""
@classmethod @classmethod
def define_schema(cls) -> comfy_io.Schema: def define_schema(cls) -> IO.Schema:
return comfy_io.Schema( return IO.Schema(
node_id="PikaScenesV2_2", node_id="PikaScenesV2_2",
display_name="Pika Scenes (Video Image Composition)", display_name="Pika Scenes (Video Image Composition)",
description="Combine your images to create a video with the objects in them. Upload multiple images as ingredients and generate a high-quality video that incorporates all of them.", description="Combine your images to create a video with the objects in them. Upload multiple images as ingredients and generate a high-quality video that incorporates all of them.",
category="api node/video/Pika", category="api node/video/Pika",
inputs=[ inputs=[
*get_base_inputs_types(), *get_base_inputs_types(),
comfy_io.Combo.Input( IO.Combo.Input(
"ingredients_mode", "ingredients_mode",
options=["creative", "precise"], options=["creative", "precise"],
default="creative", default="creative",
), ),
comfy_io.Float.Input( IO.Float.Input(
"aspect_ratio", "aspect_ratio",
step=0.001, step=0.001,
min=0.4, min=0.4,
@ -237,37 +237,37 @@ class PikaScenes(comfy_io.ComfyNode):
default=1.7777777777777777, default=1.7777777777777777,
tooltip="Aspect ratio (width / height)", tooltip="Aspect ratio (width / height)",
), ),
comfy_io.Image.Input( IO.Image.Input(
"image_ingredient_1", "image_ingredient_1",
optional=True, optional=True,
tooltip="Image that will be used as ingredient to create a video.", tooltip="Image that will be used as ingredient to create a video.",
), ),
comfy_io.Image.Input( IO.Image.Input(
"image_ingredient_2", "image_ingredient_2",
optional=True, optional=True,
tooltip="Image that will be used as ingredient to create a video.", tooltip="Image that will be used as ingredient to create a video.",
), ),
comfy_io.Image.Input( IO.Image.Input(
"image_ingredient_3", "image_ingredient_3",
optional=True, optional=True,
tooltip="Image that will be used as ingredient to create a video.", tooltip="Image that will be used as ingredient to create a video.",
), ),
comfy_io.Image.Input( IO.Image.Input(
"image_ingredient_4", "image_ingredient_4",
optional=True, optional=True,
tooltip="Image that will be used as ingredient to create a video.", tooltip="Image that will be used as ingredient to create a video.",
), ),
comfy_io.Image.Input( IO.Image.Input(
"image_ingredient_5", "image_ingredient_5",
optional=True, optional=True,
tooltip="Image that will be used as ingredient to create a video.", tooltip="Image that will be used as ingredient to create a video.",
), ),
], ],
outputs=[comfy_io.Video.Output()], outputs=[IO.Video.Output()],
hidden=[ hidden=[
comfy_io.Hidden.auth_token_comfy_org, IO.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org, IO.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id, IO.Hidden.unique_id,
], ],
is_api_node=True, is_api_node=True,
) )
@ -287,7 +287,7 @@ class PikaScenes(comfy_io.ComfyNode):
image_ingredient_3: Optional[torch.Tensor] = None, image_ingredient_3: Optional[torch.Tensor] = None,
image_ingredient_4: Optional[torch.Tensor] = None, image_ingredient_4: Optional[torch.Tensor] = None,
image_ingredient_5: Optional[torch.Tensor] = None, image_ingredient_5: Optional[torch.Tensor] = None,
) -> comfy_io.NodeOutput: ) -> IO.NodeOutput:
all_image_bytes_io = [] all_image_bytes_io = []
for image in [ for image in [
image_ingredient_1, image_ingredient_1,
@ -333,33 +333,33 @@ class PikaScenes(comfy_io.ComfyNode):
return await execute_task(initial_operation, auth_kwargs=auth, node_id=cls.hidden.unique_id) return await execute_task(initial_operation, auth_kwargs=auth, node_id=cls.hidden.unique_id)
class PikAdditionsNode(comfy_io.ComfyNode): class PikAdditionsNode(IO.ComfyNode):
"""Pika Pikadditions Node. Add an image into a video.""" """Pika Pikadditions Node. Add an image into a video."""
@classmethod @classmethod
def define_schema(cls) -> comfy_io.Schema: def define_schema(cls) -> IO.Schema:
return comfy_io.Schema( return IO.Schema(
node_id="Pikadditions", node_id="Pikadditions",
display_name="Pikadditions (Video Object Insertion)", display_name="Pikadditions (Video Object Insertion)",
description="Add any object or image into your video. Upload a video and specify what you'd like to add to create a seamlessly integrated result.", description="Add any object or image into your video. Upload a video and specify what you'd like to add to create a seamlessly integrated result.",
category="api node/video/Pika", category="api node/video/Pika",
inputs=[ inputs=[
comfy_io.Video.Input("video", tooltip="The video to add an image to."), IO.Video.Input("video", tooltip="The video to add an image to."),
comfy_io.Image.Input("image", tooltip="The image to add to the video."), IO.Image.Input("image", tooltip="The image to add to the video."),
comfy_io.String.Input("prompt_text", multiline=True), IO.String.Input("prompt_text", multiline=True),
comfy_io.String.Input("negative_prompt", multiline=True), IO.String.Input("negative_prompt", multiline=True),
comfy_io.Int.Input( IO.Int.Input(
"seed", "seed",
min=0, min=0,
max=0xFFFFFFFF, max=0xFFFFFFFF,
control_after_generate=True, control_after_generate=True,
), ),
], ],
outputs=[comfy_io.Video.Output()], outputs=[IO.Video.Output()],
hidden=[ hidden=[
comfy_io.Hidden.auth_token_comfy_org, IO.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org, IO.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id, IO.Hidden.unique_id,
], ],
is_api_node=True, is_api_node=True,
) )
@ -372,7 +372,7 @@ class PikAdditionsNode(comfy_io.ComfyNode):
prompt_text: str, prompt_text: str,
negative_prompt: str, negative_prompt: str,
seed: int, seed: int,
) -> comfy_io.NodeOutput: ) -> IO.NodeOutput:
video_bytes_io = BytesIO() video_bytes_io = BytesIO()
video.save_to(video_bytes_io, format=VideoContainer.MP4, codec=VideoCodec.H264) video.save_to(video_bytes_io, format=VideoContainer.MP4, codec=VideoCodec.H264)
video_bytes_io.seek(0) video_bytes_io.seek(0)
@ -407,43 +407,43 @@ class PikAdditionsNode(comfy_io.ComfyNode):
return await execute_task(initial_operation, auth_kwargs=auth, node_id=cls.hidden.unique_id) return await execute_task(initial_operation, auth_kwargs=auth, node_id=cls.hidden.unique_id)
class PikaSwapsNode(comfy_io.ComfyNode): class PikaSwapsNode(IO.ComfyNode):
"""Pika Pikaswaps Node.""" """Pika Pikaswaps Node."""
@classmethod @classmethod
def define_schema(cls) -> comfy_io.Schema: def define_schema(cls) -> IO.Schema:
return comfy_io.Schema( return IO.Schema(
node_id="Pikaswaps", node_id="Pikaswaps",
display_name="Pika Swaps (Video Object Replacement)", display_name="Pika Swaps (Video Object Replacement)",
description="Swap out any object or region of your video with a new image or object. Define areas to replace either with a mask or coordinates.", description="Swap out any object or region of your video with a new image or object. Define areas to replace either with a mask or coordinates.",
category="api node/video/Pika", category="api node/video/Pika",
inputs=[ inputs=[
comfy_io.Video.Input("video", tooltip="The video to swap an object in."), IO.Video.Input("video", tooltip="The video to swap an object in."),
comfy_io.Image.Input( IO.Image.Input(
"image", "image",
tooltip="The image used to replace the masked object in the video.", tooltip="The image used to replace the masked object in the video.",
optional=True, optional=True,
), ),
comfy_io.Mask.Input( IO.Mask.Input(
"mask", "mask",
tooltip="Use the mask to define areas in the video to replace.", tooltip="Use the mask to define areas in the video to replace.",
optional=True, optional=True,
), ),
comfy_io.String.Input("prompt_text", multiline=True, optional=True), IO.String.Input("prompt_text", multiline=True, optional=True),
comfy_io.String.Input("negative_prompt", multiline=True, optional=True), IO.String.Input("negative_prompt", multiline=True, optional=True),
comfy_io.Int.Input("seed", min=0, max=0xFFFFFFFF, control_after_generate=True, optional=True), IO.Int.Input("seed", min=0, max=0xFFFFFFFF, control_after_generate=True, optional=True),
comfy_io.String.Input( IO.String.Input(
"region_to_modify", "region_to_modify",
multiline=True, multiline=True,
optional=True, optional=True,
tooltip="Plaintext description of the object / region to modify.", tooltip="Plaintext description of the object / region to modify.",
), ),
], ],
outputs=[comfy_io.Video.Output()], outputs=[IO.Video.Output()],
hidden=[ hidden=[
comfy_io.Hidden.auth_token_comfy_org, IO.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org, IO.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id, IO.Hidden.unique_id,
], ],
is_api_node=True, is_api_node=True,
) )
@ -458,7 +458,7 @@ class PikaSwapsNode(comfy_io.ComfyNode):
negative_prompt: str = "", negative_prompt: str = "",
seed: int = 0, seed: int = 0,
region_to_modify: str = "", region_to_modify: str = "",
) -> comfy_io.NodeOutput: ) -> IO.NodeOutput:
video_bytes_io = BytesIO() video_bytes_io = BytesIO()
video.save_to(video_bytes_io, format=VideoContainer.MP4, codec=VideoCodec.H264) video.save_to(video_bytes_io, format=VideoContainer.MP4, codec=VideoCodec.H264)
video_bytes_io.seek(0) video_bytes_io.seek(0)
@ -495,30 +495,30 @@ class PikaSwapsNode(comfy_io.ComfyNode):
return await execute_task(initial_operation, auth_kwargs=auth, node_id=cls.hidden.unique_id) return await execute_task(initial_operation, auth_kwargs=auth, node_id=cls.hidden.unique_id)
class PikaffectsNode(comfy_io.ComfyNode): class PikaffectsNode(IO.ComfyNode):
"""Pika Pikaffects Node.""" """Pika Pikaffects Node."""
@classmethod @classmethod
def define_schema(cls) -> comfy_io.Schema: def define_schema(cls) -> IO.Schema:
return comfy_io.Schema( return IO.Schema(
node_id="Pikaffects", node_id="Pikaffects",
display_name="Pikaffects (Video Effects)", display_name="Pikaffects (Video Effects)",
description="Generate a video with a specific Pikaffect. Supported Pikaffects: Cake-ify, Crumble, Crush, Decapitate, Deflate, Dissolve, Explode, Eye-pop, Inflate, Levitate, Melt, Peel, Poke, Squish, Ta-da, Tear", description="Generate a video with a specific Pikaffect. Supported Pikaffects: Cake-ify, Crumble, Crush, Decapitate, Deflate, Dissolve, Explode, Eye-pop, Inflate, Levitate, Melt, Peel, Poke, Squish, Ta-da, Tear",
category="api node/video/Pika", category="api node/video/Pika",
inputs=[ inputs=[
comfy_io.Image.Input("image", tooltip="The reference image to apply the Pikaffect to."), IO.Image.Input("image", tooltip="The reference image to apply the Pikaffect to."),
comfy_io.Combo.Input( IO.Combo.Input(
"pikaffect", options=pika_defs.Pikaffect, default="Cake-ify" "pikaffect", options=pika_defs.Pikaffect, default="Cake-ify"
), ),
comfy_io.String.Input("prompt_text", multiline=True), IO.String.Input("prompt_text", multiline=True),
comfy_io.String.Input("negative_prompt", multiline=True), IO.String.Input("negative_prompt", multiline=True),
comfy_io.Int.Input("seed", min=0, max=0xFFFFFFFF, control_after_generate=True), IO.Int.Input("seed", min=0, max=0xFFFFFFFF, control_after_generate=True),
], ],
outputs=[comfy_io.Video.Output()], outputs=[IO.Video.Output()],
hidden=[ hidden=[
comfy_io.Hidden.auth_token_comfy_org, IO.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org, IO.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id, IO.Hidden.unique_id,
], ],
is_api_node=True, is_api_node=True,
) )
@ -531,7 +531,7 @@ class PikaffectsNode(comfy_io.ComfyNode):
prompt_text: str, prompt_text: str,
negative_prompt: str, negative_prompt: str,
seed: int, seed: int,
) -> comfy_io.NodeOutput: ) -> IO.NodeOutput:
auth = { auth = {
"auth_token": cls.hidden.auth_token_comfy_org, "auth_token": cls.hidden.auth_token_comfy_org,
"comfy_api_key": cls.hidden.api_key_comfy_org, "comfy_api_key": cls.hidden.api_key_comfy_org,
@ -556,26 +556,26 @@ class PikaffectsNode(comfy_io.ComfyNode):
return await execute_task(initial_operation, auth_kwargs=auth, node_id=cls.hidden.unique_id) return await execute_task(initial_operation, auth_kwargs=auth, node_id=cls.hidden.unique_id)
class PikaStartEndFrameNode(comfy_io.ComfyNode): class PikaStartEndFrameNode(IO.ComfyNode):
"""PikaFrames v2.2 Node.""" """PikaFrames v2.2 Node."""
@classmethod @classmethod
def define_schema(cls) -> comfy_io.Schema: def define_schema(cls) -> IO.Schema:
return comfy_io.Schema( return IO.Schema(
node_id="PikaStartEndFrameNode2_2", node_id="PikaStartEndFrameNode2_2",
display_name="Pika Start and End Frame to Video", display_name="Pika Start and End Frame to Video",
description="Generate a video by combining your first and last frame. Upload two images to define the start and end points, and let the AI create a smooth transition between them.", description="Generate a video by combining your first and last frame. Upload two images to define the start and end points, and let the AI create a smooth transition between them.",
category="api node/video/Pika", category="api node/video/Pika",
inputs=[ inputs=[
comfy_io.Image.Input("image_start", tooltip="The first image to combine."), IO.Image.Input("image_start", tooltip="The first image to combine."),
comfy_io.Image.Input("image_end", tooltip="The last image to combine."), IO.Image.Input("image_end", tooltip="The last image to combine."),
*get_base_inputs_types(), *get_base_inputs_types(),
], ],
outputs=[comfy_io.Video.Output()], outputs=[IO.Video.Output()],
hidden=[ hidden=[
comfy_io.Hidden.auth_token_comfy_org, IO.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org, IO.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id, IO.Hidden.unique_id,
], ],
is_api_node=True, is_api_node=True,
) )
@ -590,7 +590,7 @@ class PikaStartEndFrameNode(comfy_io.ComfyNode):
seed: int, seed: int,
resolution: str, resolution: str,
duration: int, duration: int,
) -> comfy_io.NodeOutput: ) -> IO.NodeOutput:
validate_string(prompt_text, field_name="prompt_text", min_length=1) validate_string(prompt_text, field_name="prompt_text", min_length=1)
pika_files = [ pika_files = [
("keyFrames", ("image_start.png", tensor_to_bytesio(image_start), "image/png")), ("keyFrames", ("image_start.png", tensor_to_bytesio(image_start), "image/png")),
@ -623,7 +623,7 @@ class PikaStartEndFrameNode(comfy_io.ComfyNode):
class PikaApiNodesExtension(ComfyExtension): class PikaApiNodesExtension(ComfyExtension):
@override @override
async def get_node_list(self) -> list[type[comfy_io.ComfyNode]]: async def get_node_list(self) -> list[type[IO.ComfyNode]]:
return [ return [
PikaImageToVideo, PikaImageToVideo,
PikaTextToVideoNode, PikaTextToVideoNode,

View File

@ -29,7 +29,7 @@ from comfy_api_nodes.apinode_utils import (
validate_string, validate_string,
) )
from comfy_api.input_impl import VideoFromFile from comfy_api.input_impl import VideoFromFile
from comfy_api.latest import ComfyExtension, io as comfy_io from comfy_api.latest import ComfyExtension, IO
import torch import torch
import aiohttp import aiohttp
@ -73,69 +73,69 @@ async def upload_image_to_pixverse(image: torch.Tensor, auth_kwargs=None):
return response_upload.Resp.img_id return response_upload.Resp.img_id
class PixverseTemplateNode(comfy_io.ComfyNode): class PixverseTemplateNode(IO.ComfyNode):
""" """
Select template for PixVerse Video generation. Select template for PixVerse Video generation.
""" """
@classmethod @classmethod
def define_schema(cls) -> comfy_io.Schema: def define_schema(cls) -> IO.Schema:
return comfy_io.Schema( return IO.Schema(
node_id="PixverseTemplateNode", node_id="PixverseTemplateNode",
display_name="PixVerse Template", display_name="PixVerse Template",
category="api node/video/PixVerse", category="api node/video/PixVerse",
inputs=[ inputs=[
comfy_io.Combo.Input("template", options=list(pixverse_templates.keys())), IO.Combo.Input("template", options=list(pixverse_templates.keys())),
], ],
outputs=[comfy_io.Custom(PixverseIO.TEMPLATE).Output(display_name="pixverse_template")], outputs=[IO.Custom(PixverseIO.TEMPLATE).Output(display_name="pixverse_template")],
) )
@classmethod @classmethod
def execute(cls, template: str) -> comfy_io.NodeOutput: def execute(cls, template: str) -> IO.NodeOutput:
template_id = pixverse_templates.get(template, None) template_id = pixverse_templates.get(template, None)
if template_id is None: if template_id is None:
raise Exception(f"Template '{template}' is not recognized.") raise Exception(f"Template '{template}' is not recognized.")
# just return the integer # just return the integer
return comfy_io.NodeOutput(template_id) return IO.NodeOutput(template_id)
class PixverseTextToVideoNode(comfy_io.ComfyNode): class PixverseTextToVideoNode(IO.ComfyNode):
""" """
Generates videos based on prompt and output_size. Generates videos based on prompt and output_size.
""" """
@classmethod @classmethod
def define_schema(cls) -> comfy_io.Schema: def define_schema(cls) -> IO.Schema:
return comfy_io.Schema( return IO.Schema(
node_id="PixverseTextToVideoNode", node_id="PixverseTextToVideoNode",
display_name="PixVerse Text to Video", display_name="PixVerse Text to Video",
category="api node/video/PixVerse", category="api node/video/PixVerse",
description=cleandoc(cls.__doc__ or ""), description=cleandoc(cls.__doc__ or ""),
inputs=[ inputs=[
comfy_io.String.Input( IO.String.Input(
"prompt", "prompt",
multiline=True, multiline=True,
default="", default="",
tooltip="Prompt for the video generation", tooltip="Prompt for the video generation",
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"aspect_ratio", "aspect_ratio",
options=PixverseAspectRatio, options=PixverseAspectRatio,
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"quality", "quality",
options=PixverseQuality, options=PixverseQuality,
default=PixverseQuality.res_540p, default=PixverseQuality.res_540p,
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"duration_seconds", "duration_seconds",
options=PixverseDuration, options=PixverseDuration,
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"motion_mode", "motion_mode",
options=PixverseMotionMode, options=PixverseMotionMode,
), ),
comfy_io.Int.Input( IO.Int.Input(
"seed", "seed",
default=0, default=0,
min=0, min=0,
@ -143,24 +143,24 @@ class PixverseTextToVideoNode(comfy_io.ComfyNode):
control_after_generate=True, control_after_generate=True,
tooltip="Seed for video generation.", tooltip="Seed for video generation.",
), ),
comfy_io.String.Input( IO.String.Input(
"negative_prompt", "negative_prompt",
default="", default="",
multiline=True, multiline=True,
tooltip="An optional text description of undesired elements on an image.", tooltip="An optional text description of undesired elements on an image.",
optional=True, optional=True,
), ),
comfy_io.Custom(PixverseIO.TEMPLATE).Input( IO.Custom(PixverseIO.TEMPLATE).Input(
"pixverse_template", "pixverse_template",
tooltip="An optional template to influence style of generation, created by the PixVerse Template node.", tooltip="An optional template to influence style of generation, created by the PixVerse Template node.",
optional=True, optional=True,
), ),
], ],
outputs=[comfy_io.Video.Output()], outputs=[IO.Video.Output()],
hidden=[ hidden=[
comfy_io.Hidden.auth_token_comfy_org, IO.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org, IO.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id, IO.Hidden.unique_id,
], ],
is_api_node=True, is_api_node=True,
) )
@ -176,7 +176,7 @@ class PixverseTextToVideoNode(comfy_io.ComfyNode):
seed, seed,
negative_prompt: str = None, negative_prompt: str = None,
pixverse_template: int = None, pixverse_template: int = None,
) -> comfy_io.NodeOutput: ) -> IO.NodeOutput:
validate_string(prompt, strip_whitespace=False) validate_string(prompt, strip_whitespace=False)
# 1080p is limited to 5 seconds duration # 1080p is limited to 5 seconds duration
# only normal motion_mode supported for 1080p or for non-5 second duration # only normal motion_mode supported for 1080p or for non-5 second duration
@ -237,43 +237,43 @@ class PixverseTextToVideoNode(comfy_io.ComfyNode):
async with aiohttp.ClientSession() as session: async with aiohttp.ClientSession() as session:
async with session.get(response_poll.Resp.url) as vid_response: async with session.get(response_poll.Resp.url) as vid_response:
return comfy_io.NodeOutput(VideoFromFile(BytesIO(await vid_response.content.read()))) return IO.NodeOutput(VideoFromFile(BytesIO(await vid_response.content.read())))
class PixverseImageToVideoNode(comfy_io.ComfyNode): class PixverseImageToVideoNode(IO.ComfyNode):
""" """
Generates videos based on prompt and output_size. Generates videos based on prompt and output_size.
""" """
@classmethod @classmethod
def define_schema(cls) -> comfy_io.Schema: def define_schema(cls) -> IO.Schema:
return comfy_io.Schema( return IO.Schema(
node_id="PixverseImageToVideoNode", node_id="PixverseImageToVideoNode",
display_name="PixVerse Image to Video", display_name="PixVerse Image to Video",
category="api node/video/PixVerse", category="api node/video/PixVerse",
description=cleandoc(cls.__doc__ or ""), description=cleandoc(cls.__doc__ or ""),
inputs=[ inputs=[
comfy_io.Image.Input("image"), IO.Image.Input("image"),
comfy_io.String.Input( IO.String.Input(
"prompt", "prompt",
multiline=True, multiline=True,
default="", default="",
tooltip="Prompt for the video generation", tooltip="Prompt for the video generation",
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"quality", "quality",
options=PixverseQuality, options=PixverseQuality,
default=PixverseQuality.res_540p, default=PixverseQuality.res_540p,
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"duration_seconds", "duration_seconds",
options=PixverseDuration, options=PixverseDuration,
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"motion_mode", "motion_mode",
options=PixverseMotionMode, options=PixverseMotionMode,
), ),
comfy_io.Int.Input( IO.Int.Input(
"seed", "seed",
default=0, default=0,
min=0, min=0,
@ -281,24 +281,24 @@ class PixverseImageToVideoNode(comfy_io.ComfyNode):
control_after_generate=True, control_after_generate=True,
tooltip="Seed for video generation.", tooltip="Seed for video generation.",
), ),
comfy_io.String.Input( IO.String.Input(
"negative_prompt", "negative_prompt",
default="", default="",
multiline=True, multiline=True,
tooltip="An optional text description of undesired elements on an image.", tooltip="An optional text description of undesired elements on an image.",
optional=True, optional=True,
), ),
comfy_io.Custom(PixverseIO.TEMPLATE).Input( IO.Custom(PixverseIO.TEMPLATE).Input(
"pixverse_template", "pixverse_template",
tooltip="An optional template to influence style of generation, created by the PixVerse Template node.", tooltip="An optional template to influence style of generation, created by the PixVerse Template node.",
optional=True, optional=True,
), ),
], ],
outputs=[comfy_io.Video.Output()], outputs=[IO.Video.Output()],
hidden=[ hidden=[
comfy_io.Hidden.auth_token_comfy_org, IO.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org, IO.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id, IO.Hidden.unique_id,
], ],
is_api_node=True, is_api_node=True,
) )
@ -314,7 +314,7 @@ class PixverseImageToVideoNode(comfy_io.ComfyNode):
seed, seed,
negative_prompt: str = None, negative_prompt: str = None,
pixverse_template: int = None, pixverse_template: int = None,
) -> comfy_io.NodeOutput: ) -> IO.NodeOutput:
validate_string(prompt, strip_whitespace=False) validate_string(prompt, strip_whitespace=False)
auth = { auth = {
"auth_token": cls.hidden.auth_token_comfy_org, "auth_token": cls.hidden.auth_token_comfy_org,
@ -377,44 +377,44 @@ class PixverseImageToVideoNode(comfy_io.ComfyNode):
async with aiohttp.ClientSession() as session: async with aiohttp.ClientSession() as session:
async with session.get(response_poll.Resp.url) as vid_response: async with session.get(response_poll.Resp.url) as vid_response:
return comfy_io.NodeOutput(VideoFromFile(BytesIO(await vid_response.content.read()))) return IO.NodeOutput(VideoFromFile(BytesIO(await vid_response.content.read())))
class PixverseTransitionVideoNode(comfy_io.ComfyNode): class PixverseTransitionVideoNode(IO.ComfyNode):
""" """
Generates videos based on prompt and output_size. Generates videos based on prompt and output_size.
""" """
@classmethod @classmethod
def define_schema(cls) -> comfy_io.Schema: def define_schema(cls) -> IO.Schema:
return comfy_io.Schema( return IO.Schema(
node_id="PixverseTransitionVideoNode", node_id="PixverseTransitionVideoNode",
display_name="PixVerse Transition Video", display_name="PixVerse Transition Video",
category="api node/video/PixVerse", category="api node/video/PixVerse",
description=cleandoc(cls.__doc__ or ""), description=cleandoc(cls.__doc__ or ""),
inputs=[ inputs=[
comfy_io.Image.Input("first_frame"), IO.Image.Input("first_frame"),
comfy_io.Image.Input("last_frame"), IO.Image.Input("last_frame"),
comfy_io.String.Input( IO.String.Input(
"prompt", "prompt",
multiline=True, multiline=True,
default="", default="",
tooltip="Prompt for the video generation", tooltip="Prompt for the video generation",
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"quality", "quality",
options=PixverseQuality, options=PixverseQuality,
default=PixverseQuality.res_540p, default=PixverseQuality.res_540p,
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"duration_seconds", "duration_seconds",
options=PixverseDuration, options=PixverseDuration,
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"motion_mode", "motion_mode",
options=PixverseMotionMode, options=PixverseMotionMode,
), ),
comfy_io.Int.Input( IO.Int.Input(
"seed", "seed",
default=0, default=0,
min=0, min=0,
@ -422,7 +422,7 @@ class PixverseTransitionVideoNode(comfy_io.ComfyNode):
control_after_generate=True, control_after_generate=True,
tooltip="Seed for video generation.", tooltip="Seed for video generation.",
), ),
comfy_io.String.Input( IO.String.Input(
"negative_prompt", "negative_prompt",
default="", default="",
multiline=True, multiline=True,
@ -430,11 +430,11 @@ class PixverseTransitionVideoNode(comfy_io.ComfyNode):
optional=True, optional=True,
), ),
], ],
outputs=[comfy_io.Video.Output()], outputs=[IO.Video.Output()],
hidden=[ hidden=[
comfy_io.Hidden.auth_token_comfy_org, IO.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org, IO.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id, IO.Hidden.unique_id,
], ],
is_api_node=True, is_api_node=True,
) )
@ -450,7 +450,7 @@ class PixverseTransitionVideoNode(comfy_io.ComfyNode):
motion_mode: str, motion_mode: str,
seed, seed,
negative_prompt: str = None, negative_prompt: str = None,
) -> comfy_io.NodeOutput: ) -> IO.NodeOutput:
validate_string(prompt, strip_whitespace=False) validate_string(prompt, strip_whitespace=False)
auth = { auth = {
"auth_token": cls.hidden.auth_token_comfy_org, "auth_token": cls.hidden.auth_token_comfy_org,
@ -514,12 +514,12 @@ class PixverseTransitionVideoNode(comfy_io.ComfyNode):
async with aiohttp.ClientSession() as session: async with aiohttp.ClientSession() as session:
async with session.get(response_poll.Resp.url) as vid_response: async with session.get(response_poll.Resp.url) as vid_response:
return comfy_io.NodeOutput(VideoFromFile(BytesIO(await vid_response.content.read()))) return IO.NodeOutput(VideoFromFile(BytesIO(await vid_response.content.read())))
class PixVerseExtension(ComfyExtension): class PixVerseExtension(ComfyExtension):
@override @override
async def get_node_list(self) -> list[type[comfy_io.ComfyNode]]: async def get_node_list(self) -> list[type[IO.ComfyNode]]:
return [ return [
PixverseTextToVideoNode, PixverseTextToVideoNode,
PixverseImageToVideoNode, PixverseImageToVideoNode,

View File

@ -32,20 +32,20 @@ from comfy_api_nodes.apis.client import (
SynchronousOperation, SynchronousOperation,
PollingOperation, PollingOperation,
) )
from comfy_api.latest import ComfyExtension, io as comfy_io from comfy_api.latest import ComfyExtension, IO
COMMON_PARAMETERS = [ COMMON_PARAMETERS = [
comfy_io.Int.Input( IO.Int.Input(
"Seed", "Seed",
default=0, default=0,
min=0, min=0,
max=65535, max=65535,
display_mode=comfy_io.NumberDisplay.number, display_mode=IO.NumberDisplay.number,
optional=True, optional=True,
), ),
comfy_io.Combo.Input("Material_Type", options=["PBR", "Shaded"], default="PBR", optional=True), IO.Combo.Input("Material_Type", options=["PBR", "Shaded"], default="PBR", optional=True),
comfy_io.Combo.Input( IO.Combo.Input(
"Polygon_count", "Polygon_count",
options=["4K-Quad", "8K-Quad", "18K-Quad", "50K-Quad", "200K-Triangle"], options=["4K-Quad", "8K-Quad", "18K-Quad", "50K-Quad", "200K-Triangle"],
default="18K-Quad", default="18K-Quad",
@ -259,24 +259,24 @@ async def download_files(url_list, task_uuid):
return model_file_path return model_file_path
class Rodin3D_Regular(comfy_io.ComfyNode): class Rodin3D_Regular(IO.ComfyNode):
"""Generate 3D Assets using Rodin API""" """Generate 3D Assets using Rodin API"""
@classmethod @classmethod
def define_schema(cls) -> comfy_io.Schema: def define_schema(cls) -> IO.Schema:
return comfy_io.Schema( return IO.Schema(
node_id="Rodin3D_Regular", node_id="Rodin3D_Regular",
display_name="Rodin 3D Generate - Regular Generate", display_name="Rodin 3D Generate - Regular Generate",
category="api node/3d/Rodin", category="api node/3d/Rodin",
description=cleandoc(cls.__doc__ or ""), description=cleandoc(cls.__doc__ or ""),
inputs=[ inputs=[
comfy_io.Image.Input("Images"), IO.Image.Input("Images"),
*COMMON_PARAMETERS, *COMMON_PARAMETERS,
], ],
outputs=[comfy_io.String.Output(display_name="3D Model Path")], outputs=[IO.String.Output(display_name="3D Model Path")],
hidden=[ hidden=[
comfy_io.Hidden.auth_token_comfy_org, IO.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org, IO.Hidden.api_key_comfy_org,
], ],
is_api_node=True, is_api_node=True,
) )
@ -288,7 +288,7 @@ class Rodin3D_Regular(comfy_io.ComfyNode):
Seed, Seed,
Material_Type, Material_Type,
Polygon_count, Polygon_count,
) -> comfy_io.NodeOutput: ) -> IO.NodeOutput:
tier = "Regular" tier = "Regular"
num_images = Images.shape[0] num_images = Images.shape[0]
m_images = [] m_images = []
@ -312,27 +312,27 @@ class Rodin3D_Regular(comfy_io.ComfyNode):
download_list = await get_rodin_download_list(task_uuid, auth_kwargs=auth) download_list = await get_rodin_download_list(task_uuid, auth_kwargs=auth)
model = await download_files(download_list, task_uuid) model = await download_files(download_list, task_uuid)
return comfy_io.NodeOutput(model) return IO.NodeOutput(model)
class Rodin3D_Detail(comfy_io.ComfyNode): class Rodin3D_Detail(IO.ComfyNode):
"""Generate 3D Assets using Rodin API""" """Generate 3D Assets using Rodin API"""
@classmethod @classmethod
def define_schema(cls) -> comfy_io.Schema: def define_schema(cls) -> IO.Schema:
return comfy_io.Schema( return IO.Schema(
node_id="Rodin3D_Detail", node_id="Rodin3D_Detail",
display_name="Rodin 3D Generate - Detail Generate", display_name="Rodin 3D Generate - Detail Generate",
category="api node/3d/Rodin", category="api node/3d/Rodin",
description=cleandoc(cls.__doc__ or ""), description=cleandoc(cls.__doc__ or ""),
inputs=[ inputs=[
comfy_io.Image.Input("Images"), IO.Image.Input("Images"),
*COMMON_PARAMETERS, *COMMON_PARAMETERS,
], ],
outputs=[comfy_io.String.Output(display_name="3D Model Path")], outputs=[IO.String.Output(display_name="3D Model Path")],
hidden=[ hidden=[
comfy_io.Hidden.auth_token_comfy_org, IO.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org, IO.Hidden.api_key_comfy_org,
], ],
is_api_node=True, is_api_node=True,
) )
@ -344,7 +344,7 @@ class Rodin3D_Detail(comfy_io.ComfyNode):
Seed, Seed,
Material_Type, Material_Type,
Polygon_count, Polygon_count,
) -> comfy_io.NodeOutput: ) -> IO.NodeOutput:
tier = "Detail" tier = "Detail"
num_images = Images.shape[0] num_images = Images.shape[0]
m_images = [] m_images = []
@ -368,27 +368,27 @@ class Rodin3D_Detail(comfy_io.ComfyNode):
download_list = await get_rodin_download_list(task_uuid, auth_kwargs=auth) download_list = await get_rodin_download_list(task_uuid, auth_kwargs=auth)
model = await download_files(download_list, task_uuid) model = await download_files(download_list, task_uuid)
return comfy_io.NodeOutput(model) return IO.NodeOutput(model)
class Rodin3D_Smooth(comfy_io.ComfyNode): class Rodin3D_Smooth(IO.ComfyNode):
"""Generate 3D Assets using Rodin API""" """Generate 3D Assets using Rodin API"""
@classmethod @classmethod
def define_schema(cls) -> comfy_io.Schema: def define_schema(cls) -> IO.Schema:
return comfy_io.Schema( return IO.Schema(
node_id="Rodin3D_Smooth", node_id="Rodin3D_Smooth",
display_name="Rodin 3D Generate - Smooth Generate", display_name="Rodin 3D Generate - Smooth Generate",
category="api node/3d/Rodin", category="api node/3d/Rodin",
description=cleandoc(cls.__doc__ or ""), description=cleandoc(cls.__doc__ or ""),
inputs=[ inputs=[
comfy_io.Image.Input("Images"), IO.Image.Input("Images"),
*COMMON_PARAMETERS, *COMMON_PARAMETERS,
], ],
outputs=[comfy_io.String.Output(display_name="3D Model Path")], outputs=[IO.String.Output(display_name="3D Model Path")],
hidden=[ hidden=[
comfy_io.Hidden.auth_token_comfy_org, IO.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org, IO.Hidden.api_key_comfy_org,
], ],
is_api_node=True, is_api_node=True,
) )
@ -400,7 +400,7 @@ class Rodin3D_Smooth(comfy_io.ComfyNode):
Seed, Seed,
Material_Type, Material_Type,
Polygon_count, Polygon_count,
) -> comfy_io.NodeOutput: ) -> IO.NodeOutput:
tier = "Smooth" tier = "Smooth"
num_images = Images.shape[0] num_images = Images.shape[0]
m_images = [] m_images = []
@ -424,34 +424,34 @@ class Rodin3D_Smooth(comfy_io.ComfyNode):
download_list = await get_rodin_download_list(task_uuid, auth_kwargs=auth) download_list = await get_rodin_download_list(task_uuid, auth_kwargs=auth)
model = await download_files(download_list, task_uuid) model = await download_files(download_list, task_uuid)
return comfy_io.NodeOutput(model) return IO.NodeOutput(model)
class Rodin3D_Sketch(comfy_io.ComfyNode): class Rodin3D_Sketch(IO.ComfyNode):
"""Generate 3D Assets using Rodin API""" """Generate 3D Assets using Rodin API"""
@classmethod @classmethod
def define_schema(cls) -> comfy_io.Schema: def define_schema(cls) -> IO.Schema:
return comfy_io.Schema( return IO.Schema(
node_id="Rodin3D_Sketch", node_id="Rodin3D_Sketch",
display_name="Rodin 3D Generate - Sketch Generate", display_name="Rodin 3D Generate - Sketch Generate",
category="api node/3d/Rodin", category="api node/3d/Rodin",
description=cleandoc(cls.__doc__ or ""), description=cleandoc(cls.__doc__ or ""),
inputs=[ inputs=[
comfy_io.Image.Input("Images"), IO.Image.Input("Images"),
comfy_io.Int.Input( IO.Int.Input(
"Seed", "Seed",
default=0, default=0,
min=0, min=0,
max=65535, max=65535,
display_mode=comfy_io.NumberDisplay.number, display_mode=IO.NumberDisplay.number,
optional=True, optional=True,
), ),
], ],
outputs=[comfy_io.String.Output(display_name="3D Model Path")], outputs=[IO.String.Output(display_name="3D Model Path")],
hidden=[ hidden=[
comfy_io.Hidden.auth_token_comfy_org, IO.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org, IO.Hidden.api_key_comfy_org,
], ],
is_api_node=True, is_api_node=True,
) )
@ -461,7 +461,7 @@ class Rodin3D_Sketch(comfy_io.ComfyNode):
cls, cls,
Images, Images,
Seed, Seed,
) -> comfy_io.NodeOutput: ) -> IO.NodeOutput:
tier = "Sketch" tier = "Sketch"
num_images = Images.shape[0] num_images = Images.shape[0]
m_images = [] m_images = []
@ -487,42 +487,42 @@ class Rodin3D_Sketch(comfy_io.ComfyNode):
download_list = await get_rodin_download_list(task_uuid, auth_kwargs=auth) download_list = await get_rodin_download_list(task_uuid, auth_kwargs=auth)
model = await download_files(download_list, task_uuid) model = await download_files(download_list, task_uuid)
return comfy_io.NodeOutput(model) return IO.NodeOutput(model)
class Rodin3D_Gen2(comfy_io.ComfyNode): class Rodin3D_Gen2(IO.ComfyNode):
"""Generate 3D Assets using Rodin API""" """Generate 3D Assets using Rodin API"""
@classmethod @classmethod
def define_schema(cls) -> comfy_io.Schema: def define_schema(cls) -> IO.Schema:
return comfy_io.Schema( return IO.Schema(
node_id="Rodin3D_Gen2", node_id="Rodin3D_Gen2",
display_name="Rodin 3D Generate - Gen-2 Generate", display_name="Rodin 3D Generate - Gen-2 Generate",
category="api node/3d/Rodin", category="api node/3d/Rodin",
description=cleandoc(cls.__doc__ or ""), description=cleandoc(cls.__doc__ or ""),
inputs=[ inputs=[
comfy_io.Image.Input("Images"), IO.Image.Input("Images"),
comfy_io.Int.Input( IO.Int.Input(
"Seed", "Seed",
default=0, default=0,
min=0, min=0,
max=65535, max=65535,
display_mode=comfy_io.NumberDisplay.number, display_mode=IO.NumberDisplay.number,
optional=True, optional=True,
), ),
comfy_io.Combo.Input("Material_Type", options=["PBR", "Shaded"], default="PBR", optional=True), IO.Combo.Input("Material_Type", options=["PBR", "Shaded"], default="PBR", optional=True),
comfy_io.Combo.Input( IO.Combo.Input(
"Polygon_count", "Polygon_count",
options=["4K-Quad", "8K-Quad", "18K-Quad", "50K-Quad", "2K-Triangle", "20K-Triangle", "150K-Triangle", "500K-Triangle"], options=["4K-Quad", "8K-Quad", "18K-Quad", "50K-Quad", "2K-Triangle", "20K-Triangle", "150K-Triangle", "500K-Triangle"],
default="500K-Triangle", default="500K-Triangle",
optional=True, optional=True,
), ),
comfy_io.Boolean.Input("TAPose", default=False), IO.Boolean.Input("TAPose", default=False),
], ],
outputs=[comfy_io.String.Output(display_name="3D Model Path")], outputs=[IO.String.Output(display_name="3D Model Path")],
hidden=[ hidden=[
comfy_io.Hidden.auth_token_comfy_org, IO.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org, IO.Hidden.api_key_comfy_org,
], ],
is_api_node=True, is_api_node=True,
) )
@ -535,7 +535,7 @@ class Rodin3D_Gen2(comfy_io.ComfyNode):
Material_Type, Material_Type,
Polygon_count, Polygon_count,
TAPose, TAPose,
) -> comfy_io.NodeOutput: ) -> IO.NodeOutput:
tier = "Gen-2" tier = "Gen-2"
num_images = Images.shape[0] num_images = Images.shape[0]
m_images = [] m_images = []
@ -560,12 +560,12 @@ class Rodin3D_Gen2(comfy_io.ComfyNode):
download_list = await get_rodin_download_list(task_uuid, auth_kwargs=auth) download_list = await get_rodin_download_list(task_uuid, auth_kwargs=auth)
model = await download_files(download_list, task_uuid) model = await download_files(download_list, task_uuid)
return comfy_io.NodeOutput(model) return IO.NodeOutput(model)
class Rodin3DExtension(ComfyExtension): class Rodin3DExtension(ComfyExtension):
@override @override
async def get_node_list(self) -> list[type[comfy_io.ComfyNode]]: async def get_node_list(self) -> list[type[IO.ComfyNode]]:
return [ return [
Rodin3D_Regular, Rodin3D_Regular,
Rodin3D_Detail, Rodin3D_Detail,

View File

@ -48,7 +48,7 @@ from comfy_api_nodes.apinode_utils import (
download_url_to_image_tensor, download_url_to_image_tensor,
) )
from comfy_api.input_impl import VideoFromFile from comfy_api.input_impl import VideoFromFile
from comfy_api.latest import ComfyExtension, io as comfy_io from comfy_api.latest import ComfyExtension, IO
from comfy_api_nodes.util.validation_utils import validate_image_dimensions, validate_image_aspect_ratio from comfy_api_nodes.util.validation_utils import validate_image_dimensions, validate_image_aspect_ratio
PATH_IMAGE_TO_VIDEO = "/proxy/runway/image_to_video" PATH_IMAGE_TO_VIDEO = "/proxy/runway/image_to_video"
@ -175,11 +175,11 @@ async def generate_video(
return await download_url_to_video_output(video_url) return await download_url_to_video_output(video_url)
class RunwayImageToVideoNodeGen3a(comfy_io.ComfyNode): class RunwayImageToVideoNodeGen3a(IO.ComfyNode):
@classmethod @classmethod
def define_schema(cls): def define_schema(cls):
return comfy_io.Schema( return IO.Schema(
node_id="RunwayImageToVideoNodeGen3a", node_id="RunwayImageToVideoNodeGen3a",
display_name="Runway Image to Video (Gen3a Turbo)", display_name="Runway Image to Video (Gen3a Turbo)",
category="api node/video/Runway", category="api node/video/Runway",
@ -188,42 +188,42 @@ class RunwayImageToVideoNodeGen3a(comfy_io.ComfyNode):
"your input selections will set your generation up for success: " "your input selections will set your generation up for success: "
"https://help.runwayml.com/hc/en-us/articles/33927968552339-Creating-with-Act-One-on-Gen-3-Alpha-and-Turbo.", "https://help.runwayml.com/hc/en-us/articles/33927968552339-Creating-with-Act-One-on-Gen-3-Alpha-and-Turbo.",
inputs=[ inputs=[
comfy_io.String.Input( IO.String.Input(
"prompt", "prompt",
multiline=True, multiline=True,
default="", default="",
tooltip="Text prompt for the generation", tooltip="Text prompt for the generation",
), ),
comfy_io.Image.Input( IO.Image.Input(
"start_frame", "start_frame",
tooltip="Start frame to be used for the video", tooltip="Start frame to be used for the video",
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"duration", "duration",
options=Duration, options=Duration,
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"ratio", "ratio",
options=RunwayGen3aAspectRatio, options=RunwayGen3aAspectRatio,
), ),
comfy_io.Int.Input( IO.Int.Input(
"seed", "seed",
default=0, default=0,
min=0, min=0,
max=4294967295, max=4294967295,
step=1, step=1,
control_after_generate=True, control_after_generate=True,
display_mode=comfy_io.NumberDisplay.number, display_mode=IO.NumberDisplay.number,
tooltip="Random seed for generation", tooltip="Random seed for generation",
), ),
], ],
outputs=[ outputs=[
comfy_io.Video.Output(), IO.Video.Output(),
], ],
hidden=[ hidden=[
comfy_io.Hidden.auth_token_comfy_org, IO.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org, IO.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id, IO.Hidden.unique_id,
], ],
is_api_node=True, is_api_node=True,
) )
@ -236,7 +236,7 @@ class RunwayImageToVideoNodeGen3a(comfy_io.ComfyNode):
duration: str, duration: str,
ratio: str, ratio: str,
seed: int, seed: int,
) -> comfy_io.NodeOutput: ) -> IO.NodeOutput:
validate_string(prompt, min_length=1) validate_string(prompt, min_length=1)
validate_image_dimensions(start_frame, max_width=7999, max_height=7999) validate_image_dimensions(start_frame, max_width=7999, max_height=7999)
validate_image_aspect_ratio(start_frame, min_aspect_ratio=0.5, max_aspect_ratio=2.0) validate_image_aspect_ratio(start_frame, min_aspect_ratio=0.5, max_aspect_ratio=2.0)
@ -253,7 +253,7 @@ class RunwayImageToVideoNodeGen3a(comfy_io.ComfyNode):
auth_kwargs=auth_kwargs, auth_kwargs=auth_kwargs,
) )
return comfy_io.NodeOutput( return IO.NodeOutput(
await generate_video( await generate_video(
RunwayImageToVideoRequest( RunwayImageToVideoRequest(
promptText=prompt, promptText=prompt,
@ -275,11 +275,11 @@ class RunwayImageToVideoNodeGen3a(comfy_io.ComfyNode):
) )
class RunwayImageToVideoNodeGen4(comfy_io.ComfyNode): class RunwayImageToVideoNodeGen4(IO.ComfyNode):
@classmethod @classmethod
def define_schema(cls): def define_schema(cls):
return comfy_io.Schema( return IO.Schema(
node_id="RunwayImageToVideoNodeGen4", node_id="RunwayImageToVideoNodeGen4",
display_name="Runway Image to Video (Gen4 Turbo)", display_name="Runway Image to Video (Gen4 Turbo)",
category="api node/video/Runway", category="api node/video/Runway",
@ -288,42 +288,42 @@ class RunwayImageToVideoNodeGen4(comfy_io.ComfyNode):
"your input selections will set your generation up for success: " "your input selections will set your generation up for success: "
"https://help.runwayml.com/hc/en-us/articles/37327109429011-Creating-with-Gen-4-Video.", "https://help.runwayml.com/hc/en-us/articles/37327109429011-Creating-with-Gen-4-Video.",
inputs=[ inputs=[
comfy_io.String.Input( IO.String.Input(
"prompt", "prompt",
multiline=True, multiline=True,
default="", default="",
tooltip="Text prompt for the generation", tooltip="Text prompt for the generation",
), ),
comfy_io.Image.Input( IO.Image.Input(
"start_frame", "start_frame",
tooltip="Start frame to be used for the video", tooltip="Start frame to be used for the video",
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"duration", "duration",
options=Duration, options=Duration,
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"ratio", "ratio",
options=RunwayGen4TurboAspectRatio, options=RunwayGen4TurboAspectRatio,
), ),
comfy_io.Int.Input( IO.Int.Input(
"seed", "seed",
default=0, default=0,
min=0, min=0,
max=4294967295, max=4294967295,
step=1, step=1,
control_after_generate=True, control_after_generate=True,
display_mode=comfy_io.NumberDisplay.number, display_mode=IO.NumberDisplay.number,
tooltip="Random seed for generation", tooltip="Random seed for generation",
), ),
], ],
outputs=[ outputs=[
comfy_io.Video.Output(), IO.Video.Output(),
], ],
hidden=[ hidden=[
comfy_io.Hidden.auth_token_comfy_org, IO.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org, IO.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id, IO.Hidden.unique_id,
], ],
is_api_node=True, is_api_node=True,
) )
@ -336,7 +336,7 @@ class RunwayImageToVideoNodeGen4(comfy_io.ComfyNode):
duration: str, duration: str,
ratio: str, ratio: str,
seed: int, seed: int,
) -> comfy_io.NodeOutput: ) -> IO.NodeOutput:
validate_string(prompt, min_length=1) validate_string(prompt, min_length=1)
validate_image_dimensions(start_frame, max_width=7999, max_height=7999) validate_image_dimensions(start_frame, max_width=7999, max_height=7999)
validate_image_aspect_ratio(start_frame, min_aspect_ratio=0.5, max_aspect_ratio=2.0) validate_image_aspect_ratio(start_frame, min_aspect_ratio=0.5, max_aspect_ratio=2.0)
@ -353,7 +353,7 @@ class RunwayImageToVideoNodeGen4(comfy_io.ComfyNode):
auth_kwargs=auth_kwargs, auth_kwargs=auth_kwargs,
) )
return comfy_io.NodeOutput( return IO.NodeOutput(
await generate_video( await generate_video(
RunwayImageToVideoRequest( RunwayImageToVideoRequest(
promptText=prompt, promptText=prompt,
@ -376,11 +376,11 @@ class RunwayImageToVideoNodeGen4(comfy_io.ComfyNode):
) )
class RunwayFirstLastFrameNode(comfy_io.ComfyNode): class RunwayFirstLastFrameNode(IO.ComfyNode):
@classmethod @classmethod
def define_schema(cls): def define_schema(cls):
return comfy_io.Schema( return IO.Schema(
node_id="RunwayFirstLastFrameNode", node_id="RunwayFirstLastFrameNode",
display_name="Runway First-Last-Frame to Video", display_name="Runway First-Last-Frame to Video",
category="api node/video/Runway", category="api node/video/Runway",
@ -392,46 +392,46 @@ class RunwayFirstLastFrameNode(comfy_io.ComfyNode):
"will set your generation up for success: " "will set your generation up for success: "
"https://help.runwayml.com/hc/en-us/articles/34170748696595-Creating-with-Keyframes-on-Gen-3.", "https://help.runwayml.com/hc/en-us/articles/34170748696595-Creating-with-Keyframes-on-Gen-3.",
inputs=[ inputs=[
comfy_io.String.Input( IO.String.Input(
"prompt", "prompt",
multiline=True, multiline=True,
default="", default="",
tooltip="Text prompt for the generation", tooltip="Text prompt for the generation",
), ),
comfy_io.Image.Input( IO.Image.Input(
"start_frame", "start_frame",
tooltip="Start frame to be used for the video", tooltip="Start frame to be used for the video",
), ),
comfy_io.Image.Input( IO.Image.Input(
"end_frame", "end_frame",
tooltip="End frame to be used for the video. Supported for gen3a_turbo only.", tooltip="End frame to be used for the video. Supported for gen3a_turbo only.",
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"duration", "duration",
options=Duration, options=Duration,
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"ratio", "ratio",
options=RunwayGen3aAspectRatio, options=RunwayGen3aAspectRatio,
), ),
comfy_io.Int.Input( IO.Int.Input(
"seed", "seed",
default=0, default=0,
min=0, min=0,
max=4294967295, max=4294967295,
step=1, step=1,
control_after_generate=True, control_after_generate=True,
display_mode=comfy_io.NumberDisplay.number, display_mode=IO.NumberDisplay.number,
tooltip="Random seed for generation", tooltip="Random seed for generation",
), ),
], ],
outputs=[ outputs=[
comfy_io.Video.Output(), IO.Video.Output(),
], ],
hidden=[ hidden=[
comfy_io.Hidden.auth_token_comfy_org, IO.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org, IO.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id, IO.Hidden.unique_id,
], ],
is_api_node=True, is_api_node=True,
) )
@ -445,7 +445,7 @@ class RunwayFirstLastFrameNode(comfy_io.ComfyNode):
duration: str, duration: str,
ratio: str, ratio: str,
seed: int, seed: int,
) -> comfy_io.NodeOutput: ) -> IO.NodeOutput:
validate_string(prompt, min_length=1) validate_string(prompt, min_length=1)
validate_image_dimensions(start_frame, max_width=7999, max_height=7999) validate_image_dimensions(start_frame, max_width=7999, max_height=7999)
validate_image_dimensions(end_frame, max_width=7999, max_height=7999) validate_image_dimensions(end_frame, max_width=7999, max_height=7999)
@ -467,7 +467,7 @@ class RunwayFirstLastFrameNode(comfy_io.ComfyNode):
if len(download_urls) != 2: if len(download_urls) != 2:
raise RunwayApiError("Failed to upload one or more images to comfy api.") raise RunwayApiError("Failed to upload one or more images to comfy api.")
return comfy_io.NodeOutput( return IO.NodeOutput(
await generate_video( await generate_video(
RunwayImageToVideoRequest( RunwayImageToVideoRequest(
promptText=prompt, promptText=prompt,
@ -493,40 +493,40 @@ class RunwayFirstLastFrameNode(comfy_io.ComfyNode):
) )
class RunwayTextToImageNode(comfy_io.ComfyNode): class RunwayTextToImageNode(IO.ComfyNode):
@classmethod @classmethod
def define_schema(cls): def define_schema(cls):
return comfy_io.Schema( return IO.Schema(
node_id="RunwayTextToImageNode", node_id="RunwayTextToImageNode",
display_name="Runway Text to Image", display_name="Runway Text to Image",
category="api node/image/Runway", category="api node/image/Runway",
description="Generate an image from a text prompt using Runway's Gen 4 model. " description="Generate an image from a text prompt using Runway's Gen 4 model. "
"You can also include reference image to guide the generation.", "You can also include reference image to guide the generation.",
inputs=[ inputs=[
comfy_io.String.Input( IO.String.Input(
"prompt", "prompt",
multiline=True, multiline=True,
default="", default="",
tooltip="Text prompt for the generation", tooltip="Text prompt for the generation",
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"ratio", "ratio",
options=[model.value for model in RunwayTextToImageAspectRatioEnum], options=[model.value for model in RunwayTextToImageAspectRatioEnum],
), ),
comfy_io.Image.Input( IO.Image.Input(
"reference_image", "reference_image",
tooltip="Optional reference image to guide the generation", tooltip="Optional reference image to guide the generation",
optional=True, optional=True,
), ),
], ],
outputs=[ outputs=[
comfy_io.Image.Output(), IO.Image.Output(),
], ],
hidden=[ hidden=[
comfy_io.Hidden.auth_token_comfy_org, IO.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org, IO.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id, IO.Hidden.unique_id,
], ],
is_api_node=True, is_api_node=True,
) )
@ -537,7 +537,7 @@ class RunwayTextToImageNode(comfy_io.ComfyNode):
prompt: str, prompt: str,
ratio: str, ratio: str,
reference_image: Optional[torch.Tensor] = None, reference_image: Optional[torch.Tensor] = None,
) -> comfy_io.NodeOutput: ) -> IO.NodeOutput:
validate_string(prompt, min_length=1) validate_string(prompt, min_length=1)
auth_kwargs = { auth_kwargs = {
@ -588,12 +588,12 @@ class RunwayTextToImageNode(comfy_io.ComfyNode):
if not final_response.output: if not final_response.output:
raise RunwayApiError("Runway task succeeded but no image data found in response.") raise RunwayApiError("Runway task succeeded but no image data found in response.")
return comfy_io.NodeOutput(await download_url_to_image_tensor(get_image_url_from_task_status(final_response))) return IO.NodeOutput(await download_url_to_image_tensor(get_image_url_from_task_status(final_response)))
class RunwayExtension(ComfyExtension): class RunwayExtension(ComfyExtension):
@override @override
async def get_node_list(self) -> list[type[comfy_io.ComfyNode]]: async def get_node_list(self) -> list[type[IO.ComfyNode]]:
return [ return [
RunwayFirstLastFrameNode, RunwayFirstLastFrameNode,
RunwayImageToVideoNodeGen3a, RunwayImageToVideoNodeGen3a,

View File

@ -3,7 +3,7 @@ from typing_extensions import override
import torch import torch
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
from comfy_api.latest import ComfyExtension, io as comfy_io from comfy_api.latest import ComfyExtension, IO
from comfy_api_nodes.apis.client import ( from comfy_api_nodes.apis.client import (
ApiEndpoint, ApiEndpoint,
HttpMethod, HttpMethod,
@ -31,27 +31,27 @@ class Sora2GenerationResponse(BaseModel):
status: Optional[str] = Field(None) status: Optional[str] = Field(None)
class OpenAIVideoSora2(comfy_io.ComfyNode): class OpenAIVideoSora2(IO.ComfyNode):
@classmethod @classmethod
def define_schema(cls): def define_schema(cls):
return comfy_io.Schema( return IO.Schema(
node_id="OpenAIVideoSora2", node_id="OpenAIVideoSora2",
display_name="OpenAI Sora - Video", display_name="OpenAI Sora - Video",
category="api node/video/Sora", category="api node/video/Sora",
description="OpenAI video and audio generation.", description="OpenAI video and audio generation.",
inputs=[ inputs=[
comfy_io.Combo.Input( IO.Combo.Input(
"model", "model",
options=["sora-2", "sora-2-pro"], options=["sora-2", "sora-2-pro"],
default="sora-2", default="sora-2",
), ),
comfy_io.String.Input( IO.String.Input(
"prompt", "prompt",
multiline=True, multiline=True,
default="", default="",
tooltip="Guiding text; may be empty if an input image is present.", tooltip="Guiding text; may be empty if an input image is present.",
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"size", "size",
options=[ options=[
"720x1280", "720x1280",
@ -61,22 +61,22 @@ class OpenAIVideoSora2(comfy_io.ComfyNode):
], ],
default="1280x720", default="1280x720",
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"duration", "duration",
options=[4, 8, 12], options=[4, 8, 12],
default=8, default=8,
), ),
comfy_io.Image.Input( IO.Image.Input(
"image", "image",
optional=True, optional=True,
), ),
comfy_io.Int.Input( IO.Int.Input(
"seed", "seed",
default=0, default=0,
min=0, min=0,
max=2147483647, max=2147483647,
step=1, step=1,
display_mode=comfy_io.NumberDisplay.number, display_mode=IO.NumberDisplay.number,
control_after_generate=True, control_after_generate=True,
optional=True, optional=True,
tooltip="Seed to determine if node should re-run; " tooltip="Seed to determine if node should re-run; "
@ -84,12 +84,12 @@ class OpenAIVideoSora2(comfy_io.ComfyNode):
), ),
], ],
outputs=[ outputs=[
comfy_io.Video.Output(), IO.Video.Output(),
], ],
hidden=[ hidden=[
comfy_io.Hidden.auth_token_comfy_org, IO.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org, IO.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id, IO.Hidden.unique_id,
], ],
is_api_node=True, is_api_node=True,
) )
@ -155,7 +155,7 @@ class OpenAIVideoSora2(comfy_io.ComfyNode):
estimated_duration=45 * (duration / 4) * model_time_multiplier, estimated_duration=45 * (duration / 4) * model_time_multiplier,
) )
await poll_operation.execute() await poll_operation.execute()
return comfy_io.NodeOutput( return IO.NodeOutput(
await download_url_to_video_output( await download_url_to_video_output(
f"/proxy/openai/v1/videos/{initial_response.id}/content", f"/proxy/openai/v1/videos/{initial_response.id}/content",
auth_kwargs=auth, auth_kwargs=auth,
@ -165,7 +165,7 @@ class OpenAIVideoSora2(comfy_io.ComfyNode):
class OpenAISoraExtension(ComfyExtension): class OpenAISoraExtension(ComfyExtension):
@override @override
async def get_node_list(self) -> list[type[comfy_io.ComfyNode]]: async def get_node_list(self) -> list[type[IO.ComfyNode]]:
return [ return [
OpenAIVideoSora2, OpenAIVideoSora2,
] ]

View File

@ -2,7 +2,7 @@ from inspect import cleandoc
from typing import Optional from typing import Optional
from typing_extensions import override from typing_extensions import override
from comfy_api.latest import ComfyExtension, Input, io as comfy_io from comfy_api.latest import ComfyExtension, Input, IO
from comfy_api_nodes.apis.stability_api import ( from comfy_api_nodes.apis.stability_api import (
StabilityUpscaleConservativeRequest, StabilityUpscaleConservativeRequest,
StabilityUpscaleCreativeRequest, StabilityUpscaleCreativeRequest,
@ -56,20 +56,20 @@ def get_async_dummy_status(x: StabilityResultsGetResponse):
return StabilityPollStatus.in_progress return StabilityPollStatus.in_progress
class StabilityStableImageUltraNode(comfy_io.ComfyNode): class StabilityStableImageUltraNode(IO.ComfyNode):
""" """
Generates images synchronously based on prompt and resolution. Generates images synchronously based on prompt and resolution.
""" """
@classmethod @classmethod
def define_schema(cls): def define_schema(cls):
return comfy_io.Schema( return IO.Schema(
node_id="StabilityStableImageUltraNode", node_id="StabilityStableImageUltraNode",
display_name="Stability AI Stable Image Ultra", display_name="Stability AI Stable Image Ultra",
category="api node/image/Stability AI", category="api node/image/Stability AI",
description=cleandoc(cls.__doc__ or ""), description=cleandoc(cls.__doc__ or ""),
inputs=[ inputs=[
comfy_io.String.Input( IO.String.Input(
"prompt", "prompt",
multiline=True, multiline=True,
default="", default="",
@ -80,39 +80,39 @@ class StabilityStableImageUltraNode(comfy_io.ComfyNode):
"is a value between 0 and 1. For example: `The sky was a crisp (blue:0.3) and (green:0.8)`" + "is a value between 0 and 1. For example: `The sky was a crisp (blue:0.3) and (green:0.8)`" +
"would convey a sky that was blue and green, but more green than blue.", "would convey a sky that was blue and green, but more green than blue.",
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"aspect_ratio", "aspect_ratio",
options=StabilityAspectRatio, options=StabilityAspectRatio,
default=StabilityAspectRatio.ratio_1_1, default=StabilityAspectRatio.ratio_1_1,
tooltip="Aspect ratio of generated image.", tooltip="Aspect ratio of generated image.",
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"style_preset", "style_preset",
options=get_stability_style_presets(), options=get_stability_style_presets(),
tooltip="Optional desired style of generated image.", tooltip="Optional desired style of generated image.",
), ),
comfy_io.Int.Input( IO.Int.Input(
"seed", "seed",
default=0, default=0,
min=0, min=0,
max=4294967294, max=4294967294,
step=1, step=1,
display_mode=comfy_io.NumberDisplay.number, display_mode=IO.NumberDisplay.number,
control_after_generate=True, control_after_generate=True,
tooltip="The random seed used for creating the noise.", tooltip="The random seed used for creating the noise.",
), ),
comfy_io.Image.Input( IO.Image.Input(
"image", "image",
optional=True, optional=True,
), ),
comfy_io.String.Input( IO.String.Input(
"negative_prompt", "negative_prompt",
default="", default="",
tooltip="A blurb of text describing what you do not wish to see in the output image. This is an advanced feature.", tooltip="A blurb of text describing what you do not wish to see in the output image. This is an advanced feature.",
force_input=True, force_input=True,
optional=True, optional=True,
), ),
comfy_io.Float.Input( IO.Float.Input(
"image_denoise", "image_denoise",
default=0.5, default=0.5,
min=0.0, min=0.0,
@ -123,12 +123,12 @@ class StabilityStableImageUltraNode(comfy_io.ComfyNode):
), ),
], ],
outputs=[ outputs=[
comfy_io.Image.Output(), IO.Image.Output(),
], ],
hidden=[ hidden=[
comfy_io.Hidden.auth_token_comfy_org, IO.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org, IO.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id, IO.Hidden.unique_id,
], ],
is_api_node=True, is_api_node=True,
) )
@ -143,7 +143,7 @@ class StabilityStableImageUltraNode(comfy_io.ComfyNode):
image: Optional[torch.Tensor] = None, image: Optional[torch.Tensor] = None,
negative_prompt: str = "", negative_prompt: str = "",
image_denoise: Optional[float] = 0.5, image_denoise: Optional[float] = 0.5,
) -> comfy_io.NodeOutput: ) -> IO.NodeOutput:
validate_string(prompt, strip_whitespace=False) validate_string(prompt, strip_whitespace=False)
# prepare image binary if image present # prepare image binary if image present
image_binary = None image_binary = None
@ -193,44 +193,44 @@ class StabilityStableImageUltraNode(comfy_io.ComfyNode):
image_data = base64.b64decode(response_api.image) image_data = base64.b64decode(response_api.image)
returned_image = bytesio_to_image_tensor(BytesIO(image_data)) returned_image = bytesio_to_image_tensor(BytesIO(image_data))
return comfy_io.NodeOutput(returned_image) return IO.NodeOutput(returned_image)
class StabilityStableImageSD_3_5Node(comfy_io.ComfyNode): class StabilityStableImageSD_3_5Node(IO.ComfyNode):
""" """
Generates images synchronously based on prompt and resolution. Generates images synchronously based on prompt and resolution.
""" """
@classmethod @classmethod
def define_schema(cls): def define_schema(cls):
return comfy_io.Schema( return IO.Schema(
node_id="StabilityStableImageSD_3_5Node", node_id="StabilityStableImageSD_3_5Node",
display_name="Stability AI Stable Diffusion 3.5 Image", display_name="Stability AI Stable Diffusion 3.5 Image",
category="api node/image/Stability AI", category="api node/image/Stability AI",
description=cleandoc(cls.__doc__ or ""), description=cleandoc(cls.__doc__ or ""),
inputs=[ inputs=[
comfy_io.String.Input( IO.String.Input(
"prompt", "prompt",
multiline=True, multiline=True,
default="", default="",
tooltip="What you wish to see in the output image. A strong, descriptive prompt that clearly defines elements, colors, and subjects will lead to better results.", tooltip="What you wish to see in the output image. A strong, descriptive prompt that clearly defines elements, colors, and subjects will lead to better results.",
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"model", "model",
options=Stability_SD3_5_Model, options=Stability_SD3_5_Model,
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"aspect_ratio", "aspect_ratio",
options=StabilityAspectRatio, options=StabilityAspectRatio,
default=StabilityAspectRatio.ratio_1_1, default=StabilityAspectRatio.ratio_1_1,
tooltip="Aspect ratio of generated image.", tooltip="Aspect ratio of generated image.",
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"style_preset", "style_preset",
options=get_stability_style_presets(), options=get_stability_style_presets(),
tooltip="Optional desired style of generated image.", tooltip="Optional desired style of generated image.",
), ),
comfy_io.Float.Input( IO.Float.Input(
"cfg_scale", "cfg_scale",
default=4.0, default=4.0,
min=1.0, min=1.0,
@ -238,28 +238,28 @@ class StabilityStableImageSD_3_5Node(comfy_io.ComfyNode):
step=0.1, step=0.1,
tooltip="How strictly the diffusion process adheres to the prompt text (higher values keep your image closer to your prompt)", tooltip="How strictly the diffusion process adheres to the prompt text (higher values keep your image closer to your prompt)",
), ),
comfy_io.Int.Input( IO.Int.Input(
"seed", "seed",
default=0, default=0,
min=0, min=0,
max=4294967294, max=4294967294,
step=1, step=1,
display_mode=comfy_io.NumberDisplay.number, display_mode=IO.NumberDisplay.number,
control_after_generate=True, control_after_generate=True,
tooltip="The random seed used for creating the noise.", tooltip="The random seed used for creating the noise.",
), ),
comfy_io.Image.Input( IO.Image.Input(
"image", "image",
optional=True, optional=True,
), ),
comfy_io.String.Input( IO.String.Input(
"negative_prompt", "negative_prompt",
default="", default="",
tooltip="Keywords of what you do not wish to see in the output image. This is an advanced feature.", tooltip="Keywords of what you do not wish to see in the output image. This is an advanced feature.",
force_input=True, force_input=True,
optional=True, optional=True,
), ),
comfy_io.Float.Input( IO.Float.Input(
"image_denoise", "image_denoise",
default=0.5, default=0.5,
min=0.0, min=0.0,
@ -270,12 +270,12 @@ class StabilityStableImageSD_3_5Node(comfy_io.ComfyNode):
), ),
], ],
outputs=[ outputs=[
comfy_io.Image.Output(), IO.Image.Output(),
], ],
hidden=[ hidden=[
comfy_io.Hidden.auth_token_comfy_org, IO.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org, IO.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id, IO.Hidden.unique_id,
], ],
is_api_node=True, is_api_node=True,
) )
@ -292,7 +292,7 @@ class StabilityStableImageSD_3_5Node(comfy_io.ComfyNode):
image: Optional[torch.Tensor] = None, image: Optional[torch.Tensor] = None,
negative_prompt: str = "", negative_prompt: str = "",
image_denoise: Optional[float] = 0.5, image_denoise: Optional[float] = 0.5,
) -> comfy_io.NodeOutput: ) -> IO.NodeOutput:
validate_string(prompt, strip_whitespace=False) validate_string(prompt, strip_whitespace=False)
# prepare image binary if image present # prepare image binary if image present
image_binary = None image_binary = None
@ -348,30 +348,30 @@ class StabilityStableImageSD_3_5Node(comfy_io.ComfyNode):
image_data = base64.b64decode(response_api.image) image_data = base64.b64decode(response_api.image)
returned_image = bytesio_to_image_tensor(BytesIO(image_data)) returned_image = bytesio_to_image_tensor(BytesIO(image_data))
return comfy_io.NodeOutput(returned_image) return IO.NodeOutput(returned_image)
class StabilityUpscaleConservativeNode(comfy_io.ComfyNode): class StabilityUpscaleConservativeNode(IO.ComfyNode):
""" """
Upscale image with minimal alterations to 4K resolution. Upscale image with minimal alterations to 4K resolution.
""" """
@classmethod @classmethod
def define_schema(cls): def define_schema(cls):
return comfy_io.Schema( return IO.Schema(
node_id="StabilityUpscaleConservativeNode", node_id="StabilityUpscaleConservativeNode",
display_name="Stability AI Upscale Conservative", display_name="Stability AI Upscale Conservative",
category="api node/image/Stability AI", category="api node/image/Stability AI",
description=cleandoc(cls.__doc__ or ""), description=cleandoc(cls.__doc__ or ""),
inputs=[ inputs=[
comfy_io.Image.Input("image"), IO.Image.Input("image"),
comfy_io.String.Input( IO.String.Input(
"prompt", "prompt",
multiline=True, multiline=True,
default="", default="",
tooltip="What you wish to see in the output image. A strong, descriptive prompt that clearly defines elements, colors, and subjects will lead to better results.", tooltip="What you wish to see in the output image. A strong, descriptive prompt that clearly defines elements, colors, and subjects will lead to better results.",
), ),
comfy_io.Float.Input( IO.Float.Input(
"creativity", "creativity",
default=0.35, default=0.35,
min=0.2, min=0.2,
@ -379,17 +379,17 @@ class StabilityUpscaleConservativeNode(comfy_io.ComfyNode):
step=0.01, step=0.01,
tooltip="Controls the likelihood of creating additional details not heavily conditioned by the init image.", tooltip="Controls the likelihood of creating additional details not heavily conditioned by the init image.",
), ),
comfy_io.Int.Input( IO.Int.Input(
"seed", "seed",
default=0, default=0,
min=0, min=0,
max=4294967294, max=4294967294,
step=1, step=1,
display_mode=comfy_io.NumberDisplay.number, display_mode=IO.NumberDisplay.number,
control_after_generate=True, control_after_generate=True,
tooltip="The random seed used for creating the noise.", tooltip="The random seed used for creating the noise.",
), ),
comfy_io.String.Input( IO.String.Input(
"negative_prompt", "negative_prompt",
default="", default="",
tooltip="Keywords of what you do not wish to see in the output image. This is an advanced feature.", tooltip="Keywords of what you do not wish to see in the output image. This is an advanced feature.",
@ -398,12 +398,12 @@ class StabilityUpscaleConservativeNode(comfy_io.ComfyNode):
), ),
], ],
outputs=[ outputs=[
comfy_io.Image.Output(), IO.Image.Output(),
], ],
hidden=[ hidden=[
comfy_io.Hidden.auth_token_comfy_org, IO.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org, IO.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id, IO.Hidden.unique_id,
], ],
is_api_node=True, is_api_node=True,
) )
@ -416,7 +416,7 @@ class StabilityUpscaleConservativeNode(comfy_io.ComfyNode):
creativity: float, creativity: float,
seed: int, seed: int,
negative_prompt: str = "", negative_prompt: str = "",
) -> comfy_io.NodeOutput: ) -> IO.NodeOutput:
validate_string(prompt, strip_whitespace=False) validate_string(prompt, strip_whitespace=False)
image_binary = tensor_to_bytesio(image, total_pixels=1024*1024).read() image_binary = tensor_to_bytesio(image, total_pixels=1024*1024).read()
@ -457,30 +457,30 @@ class StabilityUpscaleConservativeNode(comfy_io.ComfyNode):
image_data = base64.b64decode(response_api.image) image_data = base64.b64decode(response_api.image)
returned_image = bytesio_to_image_tensor(BytesIO(image_data)) returned_image = bytesio_to_image_tensor(BytesIO(image_data))
return comfy_io.NodeOutput(returned_image) return IO.NodeOutput(returned_image)
class StabilityUpscaleCreativeNode(comfy_io.ComfyNode): class StabilityUpscaleCreativeNode(IO.ComfyNode):
""" """
Upscale image with minimal alterations to 4K resolution. Upscale image with minimal alterations to 4K resolution.
""" """
@classmethod @classmethod
def define_schema(cls): def define_schema(cls):
return comfy_io.Schema( return IO.Schema(
node_id="StabilityUpscaleCreativeNode", node_id="StabilityUpscaleCreativeNode",
display_name="Stability AI Upscale Creative", display_name="Stability AI Upscale Creative",
category="api node/image/Stability AI", category="api node/image/Stability AI",
description=cleandoc(cls.__doc__ or ""), description=cleandoc(cls.__doc__ or ""),
inputs=[ inputs=[
comfy_io.Image.Input("image"), IO.Image.Input("image"),
comfy_io.String.Input( IO.String.Input(
"prompt", "prompt",
multiline=True, multiline=True,
default="", default="",
tooltip="What you wish to see in the output image. A strong, descriptive prompt that clearly defines elements, colors, and subjects will lead to better results.", tooltip="What you wish to see in the output image. A strong, descriptive prompt that clearly defines elements, colors, and subjects will lead to better results.",
), ),
comfy_io.Float.Input( IO.Float.Input(
"creativity", "creativity",
default=0.3, default=0.3,
min=0.1, min=0.1,
@ -488,22 +488,22 @@ class StabilityUpscaleCreativeNode(comfy_io.ComfyNode):
step=0.01, step=0.01,
tooltip="Controls the likelihood of creating additional details not heavily conditioned by the init image.", tooltip="Controls the likelihood of creating additional details not heavily conditioned by the init image.",
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"style_preset", "style_preset",
options=get_stability_style_presets(), options=get_stability_style_presets(),
tooltip="Optional desired style of generated image.", tooltip="Optional desired style of generated image.",
), ),
comfy_io.Int.Input( IO.Int.Input(
"seed", "seed",
default=0, default=0,
min=0, min=0,
max=4294967294, max=4294967294,
step=1, step=1,
display_mode=comfy_io.NumberDisplay.number, display_mode=IO.NumberDisplay.number,
control_after_generate=True, control_after_generate=True,
tooltip="The random seed used for creating the noise.", tooltip="The random seed used for creating the noise.",
), ),
comfy_io.String.Input( IO.String.Input(
"negative_prompt", "negative_prompt",
default="", default="",
tooltip="Keywords of what you do not wish to see in the output image. This is an advanced feature.", tooltip="Keywords of what you do not wish to see in the output image. This is an advanced feature.",
@ -512,12 +512,12 @@ class StabilityUpscaleCreativeNode(comfy_io.ComfyNode):
), ),
], ],
outputs=[ outputs=[
comfy_io.Image.Output(), IO.Image.Output(),
], ],
hidden=[ hidden=[
comfy_io.Hidden.auth_token_comfy_org, IO.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org, IO.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id, IO.Hidden.unique_id,
], ],
is_api_node=True, is_api_node=True,
) )
@ -531,7 +531,7 @@ class StabilityUpscaleCreativeNode(comfy_io.ComfyNode):
style_preset: str, style_preset: str,
seed: int, seed: int,
negative_prompt: str = "", negative_prompt: str = "",
) -> comfy_io.NodeOutput: ) -> IO.NodeOutput:
validate_string(prompt, strip_whitespace=False) validate_string(prompt, strip_whitespace=False)
image_binary = tensor_to_bytesio(image, total_pixels=1024*1024).read() image_binary = tensor_to_bytesio(image, total_pixels=1024*1024).read()
@ -591,37 +591,37 @@ class StabilityUpscaleCreativeNode(comfy_io.ComfyNode):
image_data = base64.b64decode(response_poll.result) image_data = base64.b64decode(response_poll.result)
returned_image = bytesio_to_image_tensor(BytesIO(image_data)) returned_image = bytesio_to_image_tensor(BytesIO(image_data))
return comfy_io.NodeOutput(returned_image) return IO.NodeOutput(returned_image)
class StabilityUpscaleFastNode(comfy_io.ComfyNode): class StabilityUpscaleFastNode(IO.ComfyNode):
""" """
Quickly upscales an image via Stability API call to 4x its original size; intended for upscaling low-quality/compressed images. Quickly upscales an image via Stability API call to 4x its original size; intended for upscaling low-quality/compressed images.
""" """
@classmethod @classmethod
def define_schema(cls): def define_schema(cls):
return comfy_io.Schema( return IO.Schema(
node_id="StabilityUpscaleFastNode", node_id="StabilityUpscaleFastNode",
display_name="Stability AI Upscale Fast", display_name="Stability AI Upscale Fast",
category="api node/image/Stability AI", category="api node/image/Stability AI",
description=cleandoc(cls.__doc__ or ""), description=cleandoc(cls.__doc__ or ""),
inputs=[ inputs=[
comfy_io.Image.Input("image"), IO.Image.Input("image"),
], ],
outputs=[ outputs=[
comfy_io.Image.Output(), IO.Image.Output(),
], ],
hidden=[ hidden=[
comfy_io.Hidden.auth_token_comfy_org, IO.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org, IO.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id, IO.Hidden.unique_id,
], ],
is_api_node=True, is_api_node=True,
) )
@classmethod @classmethod
async def execute(cls, image: torch.Tensor) -> comfy_io.NodeOutput: async def execute(cls, image: torch.Tensor) -> IO.NodeOutput:
image_binary = tensor_to_bytesio(image, total_pixels=4096*4096).read() image_binary = tensor_to_bytesio(image, total_pixels=4096*4096).read()
files = { files = {
@ -653,26 +653,26 @@ class StabilityUpscaleFastNode(comfy_io.ComfyNode):
image_data = base64.b64decode(response_api.image) image_data = base64.b64decode(response_api.image)
returned_image = bytesio_to_image_tensor(BytesIO(image_data)) returned_image = bytesio_to_image_tensor(BytesIO(image_data))
return comfy_io.NodeOutput(returned_image) return IO.NodeOutput(returned_image)
class StabilityTextToAudio(comfy_io.ComfyNode): class StabilityTextToAudio(IO.ComfyNode):
"""Generates high-quality music and sound effects from text descriptions.""" """Generates high-quality music and sound effects from text descriptions."""
@classmethod @classmethod
def define_schema(cls): def define_schema(cls):
return comfy_io.Schema( return IO.Schema(
node_id="StabilityTextToAudio", node_id="StabilityTextToAudio",
display_name="Stability AI Text To Audio", display_name="Stability AI Text To Audio",
category="api node/audio/Stability AI", category="api node/audio/Stability AI",
description=cleandoc(cls.__doc__ or ""), description=cleandoc(cls.__doc__ or ""),
inputs=[ inputs=[
comfy_io.Combo.Input( IO.Combo.Input(
"model", "model",
options=["stable-audio-2.5"], options=["stable-audio-2.5"],
), ),
comfy_io.String.Input("prompt", multiline=True, default=""), IO.String.Input("prompt", multiline=True, default=""),
comfy_io.Int.Input( IO.Int.Input(
"duration", "duration",
default=190, default=190,
min=1, min=1,
@ -681,18 +681,18 @@ class StabilityTextToAudio(comfy_io.ComfyNode):
tooltip="Controls the duration in seconds of the generated audio.", tooltip="Controls the duration in seconds of the generated audio.",
optional=True, optional=True,
), ),
comfy_io.Int.Input( IO.Int.Input(
"seed", "seed",
default=0, default=0,
min=0, min=0,
max=4294967294, max=4294967294,
step=1, step=1,
display_mode=comfy_io.NumberDisplay.number, display_mode=IO.NumberDisplay.number,
control_after_generate=True, control_after_generate=True,
tooltip="The random seed used for generation.", tooltip="The random seed used for generation.",
optional=True, optional=True,
), ),
comfy_io.Int.Input( IO.Int.Input(
"steps", "steps",
default=8, default=8,
min=4, min=4,
@ -703,18 +703,18 @@ class StabilityTextToAudio(comfy_io.ComfyNode):
), ),
], ],
outputs=[ outputs=[
comfy_io.Audio.Output(), IO.Audio.Output(),
], ],
hidden=[ hidden=[
comfy_io.Hidden.auth_token_comfy_org, IO.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org, IO.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id, IO.Hidden.unique_id,
], ],
is_api_node=True, is_api_node=True,
) )
@classmethod @classmethod
async def execute(cls, model: str, prompt: str, duration: int, seed: int, steps: int) -> comfy_io.NodeOutput: async def execute(cls, model: str, prompt: str, duration: int, seed: int, steps: int) -> IO.NodeOutput:
validate_string(prompt, max_length=10000) validate_string(prompt, max_length=10000)
payload = StabilityTextToAudioRequest(prompt=prompt, model=model, duration=duration, seed=seed, steps=steps) payload = StabilityTextToAudioRequest(prompt=prompt, model=model, duration=duration, seed=seed, steps=steps)
operation = SynchronousOperation( operation = SynchronousOperation(
@ -734,27 +734,27 @@ class StabilityTextToAudio(comfy_io.ComfyNode):
response_api = await operation.execute() response_api = await operation.execute()
if not response_api.audio: if not response_api.audio:
raise ValueError("No audio file was received in response.") raise ValueError("No audio file was received in response.")
return comfy_io.NodeOutput(audio_bytes_to_audio_input(base64.b64decode(response_api.audio))) return IO.NodeOutput(audio_bytes_to_audio_input(base64.b64decode(response_api.audio)))
class StabilityAudioToAudio(comfy_io.ComfyNode): class StabilityAudioToAudio(IO.ComfyNode):
"""Transforms existing audio samples into new high-quality compositions using text instructions.""" """Transforms existing audio samples into new high-quality compositions using text instructions."""
@classmethod @classmethod
def define_schema(cls): def define_schema(cls):
return comfy_io.Schema( return IO.Schema(
node_id="StabilityAudioToAudio", node_id="StabilityAudioToAudio",
display_name="Stability AI Audio To Audio", display_name="Stability AI Audio To Audio",
category="api node/audio/Stability AI", category="api node/audio/Stability AI",
description=cleandoc(cls.__doc__ or ""), description=cleandoc(cls.__doc__ or ""),
inputs=[ inputs=[
comfy_io.Combo.Input( IO.Combo.Input(
"model", "model",
options=["stable-audio-2.5"], options=["stable-audio-2.5"],
), ),
comfy_io.String.Input("prompt", multiline=True, default=""), IO.String.Input("prompt", multiline=True, default=""),
comfy_io.Audio.Input("audio", tooltip="Audio must be between 6 and 190 seconds long."), IO.Audio.Input("audio", tooltip="Audio must be between 6 and 190 seconds long."),
comfy_io.Int.Input( IO.Int.Input(
"duration", "duration",
default=190, default=190,
min=1, min=1,
@ -763,18 +763,18 @@ class StabilityAudioToAudio(comfy_io.ComfyNode):
tooltip="Controls the duration in seconds of the generated audio.", tooltip="Controls the duration in seconds of the generated audio.",
optional=True, optional=True,
), ),
comfy_io.Int.Input( IO.Int.Input(
"seed", "seed",
default=0, default=0,
min=0, min=0,
max=4294967294, max=4294967294,
step=1, step=1,
display_mode=comfy_io.NumberDisplay.number, display_mode=IO.NumberDisplay.number,
control_after_generate=True, control_after_generate=True,
tooltip="The random seed used for generation.", tooltip="The random seed used for generation.",
optional=True, optional=True,
), ),
comfy_io.Int.Input( IO.Int.Input(
"steps", "steps",
default=8, default=8,
min=4, min=4,
@ -783,24 +783,24 @@ class StabilityAudioToAudio(comfy_io.ComfyNode):
tooltip="Controls the number of sampling steps.", tooltip="Controls the number of sampling steps.",
optional=True, optional=True,
), ),
comfy_io.Float.Input( IO.Float.Input(
"strength", "strength",
default=1, default=1,
min=0.01, min=0.01,
max=1.0, max=1.0,
step=0.01, step=0.01,
display_mode=comfy_io.NumberDisplay.slider, display_mode=IO.NumberDisplay.slider,
tooltip="Parameter controls how much influence the audio parameter has on the generated audio.", tooltip="Parameter controls how much influence the audio parameter has on the generated audio.",
optional=True, optional=True,
), ),
], ],
outputs=[ outputs=[
comfy_io.Audio.Output(), IO.Audio.Output(),
], ],
hidden=[ hidden=[
comfy_io.Hidden.auth_token_comfy_org, IO.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org, IO.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id, IO.Hidden.unique_id,
], ],
is_api_node=True, is_api_node=True,
) )
@ -808,7 +808,7 @@ class StabilityAudioToAudio(comfy_io.ComfyNode):
@classmethod @classmethod
async def execute( async def execute(
cls, model: str, prompt: str, audio: Input.Audio, duration: int, seed: int, steps: int, strength: float cls, model: str, prompt: str, audio: Input.Audio, duration: int, seed: int, steps: int, strength: float
) -> comfy_io.NodeOutput: ) -> IO.NodeOutput:
validate_string(prompt, max_length=10000) validate_string(prompt, max_length=10000)
validate_audio_duration(audio, 6, 190) validate_audio_duration(audio, 6, 190)
payload = StabilityAudioToAudioRequest( payload = StabilityAudioToAudioRequest(
@ -832,27 +832,27 @@ class StabilityAudioToAudio(comfy_io.ComfyNode):
response_api = await operation.execute() response_api = await operation.execute()
if not response_api.audio: if not response_api.audio:
raise ValueError("No audio file was received in response.") raise ValueError("No audio file was received in response.")
return comfy_io.NodeOutput(audio_bytes_to_audio_input(base64.b64decode(response_api.audio))) return IO.NodeOutput(audio_bytes_to_audio_input(base64.b64decode(response_api.audio)))
class StabilityAudioInpaint(comfy_io.ComfyNode): class StabilityAudioInpaint(IO.ComfyNode):
"""Transforms part of existing audio sample using text instructions.""" """Transforms part of existing audio sample using text instructions."""
@classmethod @classmethod
def define_schema(cls): def define_schema(cls):
return comfy_io.Schema( return IO.Schema(
node_id="StabilityAudioInpaint", node_id="StabilityAudioInpaint",
display_name="Stability AI Audio Inpaint", display_name="Stability AI Audio Inpaint",
category="api node/audio/Stability AI", category="api node/audio/Stability AI",
description=cleandoc(cls.__doc__ or ""), description=cleandoc(cls.__doc__ or ""),
inputs=[ inputs=[
comfy_io.Combo.Input( IO.Combo.Input(
"model", "model",
options=["stable-audio-2.5"], options=["stable-audio-2.5"],
), ),
comfy_io.String.Input("prompt", multiline=True, default=""), IO.String.Input("prompt", multiline=True, default=""),
comfy_io.Audio.Input("audio", tooltip="Audio must be between 6 and 190 seconds long."), IO.Audio.Input("audio", tooltip="Audio must be between 6 and 190 seconds long."),
comfy_io.Int.Input( IO.Int.Input(
"duration", "duration",
default=190, default=190,
min=1, min=1,
@ -861,18 +861,18 @@ class StabilityAudioInpaint(comfy_io.ComfyNode):
tooltip="Controls the duration in seconds of the generated audio.", tooltip="Controls the duration in seconds of the generated audio.",
optional=True, optional=True,
), ),
comfy_io.Int.Input( IO.Int.Input(
"seed", "seed",
default=0, default=0,
min=0, min=0,
max=4294967294, max=4294967294,
step=1, step=1,
display_mode=comfy_io.NumberDisplay.number, display_mode=IO.NumberDisplay.number,
control_after_generate=True, control_after_generate=True,
tooltip="The random seed used for generation.", tooltip="The random seed used for generation.",
optional=True, optional=True,
), ),
comfy_io.Int.Input( IO.Int.Input(
"steps", "steps",
default=8, default=8,
min=4, min=4,
@ -881,7 +881,7 @@ class StabilityAudioInpaint(comfy_io.ComfyNode):
tooltip="Controls the number of sampling steps.", tooltip="Controls the number of sampling steps.",
optional=True, optional=True,
), ),
comfy_io.Int.Input( IO.Int.Input(
"mask_start", "mask_start",
default=30, default=30,
min=0, min=0,
@ -889,7 +889,7 @@ class StabilityAudioInpaint(comfy_io.ComfyNode):
step=1, step=1,
optional=True, optional=True,
), ),
comfy_io.Int.Input( IO.Int.Input(
"mask_end", "mask_end",
default=190, default=190,
min=0, min=0,
@ -899,12 +899,12 @@ class StabilityAudioInpaint(comfy_io.ComfyNode):
), ),
], ],
outputs=[ outputs=[
comfy_io.Audio.Output(), IO.Audio.Output(),
], ],
hidden=[ hidden=[
comfy_io.Hidden.auth_token_comfy_org, IO.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org, IO.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id, IO.Hidden.unique_id,
], ],
is_api_node=True, is_api_node=True,
) )
@ -920,7 +920,7 @@ class StabilityAudioInpaint(comfy_io.ComfyNode):
steps: int, steps: int,
mask_start: int, mask_start: int,
mask_end: int, mask_end: int,
) -> comfy_io.NodeOutput: ) -> IO.NodeOutput:
validate_string(prompt, max_length=10000) validate_string(prompt, max_length=10000)
if mask_end <= mask_start: if mask_end <= mask_start:
raise ValueError(f"Value of mask_end({mask_end}) should be greater then mask_start({mask_start})") raise ValueError(f"Value of mask_end({mask_end}) should be greater then mask_start({mask_start})")
@ -953,12 +953,12 @@ class StabilityAudioInpaint(comfy_io.ComfyNode):
response_api = await operation.execute() response_api = await operation.execute()
if not response_api.audio: if not response_api.audio:
raise ValueError("No audio file was received in response.") raise ValueError("No audio file was received in response.")
return comfy_io.NodeOutput(audio_bytes_to_audio_input(base64.b64decode(response_api.audio))) return IO.NodeOutput(audio_bytes_to_audio_input(base64.b64decode(response_api.audio)))
class StabilityExtension(ComfyExtension): class StabilityExtension(ComfyExtension):
@override @override
async def get_node_list(self) -> list[type[comfy_io.ComfyNode]]: async def get_node_list(self) -> list[type[IO.ComfyNode]]:
return [ return [
StabilityStableImageUltraNode, StabilityStableImageUltraNode,
StabilityStableImageSD_3_5Node, StabilityStableImageSD_3_5Node,

View File

@ -6,7 +6,7 @@ from io import BytesIO
from typing import Optional from typing import Optional
from typing_extensions import override from typing_extensions import override
from comfy_api.latest import ComfyExtension, io as comfy_io from comfy_api.latest import ComfyExtension, IO
from comfy_api.input_impl.video_types import VideoFromFile from comfy_api.input_impl.video_types import VideoFromFile
from comfy_api_nodes.apis import ( from comfy_api_nodes.apis import (
VeoGenVidRequest, VeoGenVidRequest,
@ -51,7 +51,7 @@ def get_video_url_from_response(poll_response: VeoGenVidPollResponse) -> Optiona
return None return None
class VeoVideoGenerationNode(comfy_io.ComfyNode): class VeoVideoGenerationNode(IO.ComfyNode):
""" """
Generates videos from text prompts using Google's Veo API. Generates videos from text prompts using Google's Veo API.
@ -61,71 +61,71 @@ class VeoVideoGenerationNode(comfy_io.ComfyNode):
@classmethod @classmethod
def define_schema(cls): def define_schema(cls):
return comfy_io.Schema( return IO.Schema(
node_id="VeoVideoGenerationNode", node_id="VeoVideoGenerationNode",
display_name="Google Veo 2 Video Generation", display_name="Google Veo 2 Video Generation",
category="api node/video/Veo", category="api node/video/Veo",
description="Generates videos from text prompts using Google's Veo 2 API", description="Generates videos from text prompts using Google's Veo 2 API",
inputs=[ inputs=[
comfy_io.String.Input( IO.String.Input(
"prompt", "prompt",
multiline=True, multiline=True,
default="", default="",
tooltip="Text description of the video", tooltip="Text description of the video",
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"aspect_ratio", "aspect_ratio",
options=["16:9", "9:16"], options=["16:9", "9:16"],
default="16:9", default="16:9",
tooltip="Aspect ratio of the output video", tooltip="Aspect ratio of the output video",
), ),
comfy_io.String.Input( IO.String.Input(
"negative_prompt", "negative_prompt",
multiline=True, multiline=True,
default="", default="",
tooltip="Negative text prompt to guide what to avoid in the video", tooltip="Negative text prompt to guide what to avoid in the video",
optional=True, optional=True,
), ),
comfy_io.Int.Input( IO.Int.Input(
"duration_seconds", "duration_seconds",
default=5, default=5,
min=5, min=5,
max=8, max=8,
step=1, step=1,
display_mode=comfy_io.NumberDisplay.number, display_mode=IO.NumberDisplay.number,
tooltip="Duration of the output video in seconds", tooltip="Duration of the output video in seconds",
optional=True, optional=True,
), ),
comfy_io.Boolean.Input( IO.Boolean.Input(
"enhance_prompt", "enhance_prompt",
default=True, default=True,
tooltip="Whether to enhance the prompt with AI assistance", tooltip="Whether to enhance the prompt with AI assistance",
optional=True, optional=True,
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"person_generation", "person_generation",
options=["ALLOW", "BLOCK"], options=["ALLOW", "BLOCK"],
default="ALLOW", default="ALLOW",
tooltip="Whether to allow generating people in the video", tooltip="Whether to allow generating people in the video",
optional=True, optional=True,
), ),
comfy_io.Int.Input( IO.Int.Input(
"seed", "seed",
default=0, default=0,
min=0, min=0,
max=0xFFFFFFFF, max=0xFFFFFFFF,
step=1, step=1,
display_mode=comfy_io.NumberDisplay.number, display_mode=IO.NumberDisplay.number,
control_after_generate=True, control_after_generate=True,
tooltip="Seed for video generation (0 for random)", tooltip="Seed for video generation (0 for random)",
optional=True, optional=True,
), ),
comfy_io.Image.Input( IO.Image.Input(
"image", "image",
tooltip="Optional reference image to guide video generation", tooltip="Optional reference image to guide video generation",
optional=True, optional=True,
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"model", "model",
options=["veo-2.0-generate-001"], options=["veo-2.0-generate-001"],
default="veo-2.0-generate-001", default="veo-2.0-generate-001",
@ -134,12 +134,12 @@ class VeoVideoGenerationNode(comfy_io.ComfyNode):
), ),
], ],
outputs=[ outputs=[
comfy_io.Video.Output(), IO.Video.Output(),
], ],
hidden=[ hidden=[
comfy_io.Hidden.auth_token_comfy_org, IO.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org, IO.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id, IO.Hidden.unique_id,
], ],
is_api_node=True, is_api_node=True,
) )
@ -302,7 +302,7 @@ class VeoVideoGenerationNode(comfy_io.ComfyNode):
video_io = BytesIO(video_data) video_io = BytesIO(video_data)
# Return VideoFromFile object # Return VideoFromFile object
return comfy_io.NodeOutput(VideoFromFile(video_io)) return IO.NodeOutput(VideoFromFile(video_io))
class Veo3VideoGenerationNode(VeoVideoGenerationNode): class Veo3VideoGenerationNode(VeoVideoGenerationNode):
@ -319,78 +319,78 @@ class Veo3VideoGenerationNode(VeoVideoGenerationNode):
@classmethod @classmethod
def define_schema(cls): def define_schema(cls):
return comfy_io.Schema( return IO.Schema(
node_id="Veo3VideoGenerationNode", node_id="Veo3VideoGenerationNode",
display_name="Google Veo 3 Video Generation", display_name="Google Veo 3 Video Generation",
category="api node/video/Veo", category="api node/video/Veo",
description="Generates videos from text prompts using Google's Veo 3 API", description="Generates videos from text prompts using Google's Veo 3 API",
inputs=[ inputs=[
comfy_io.String.Input( IO.String.Input(
"prompt", "prompt",
multiline=True, multiline=True,
default="", default="",
tooltip="Text description of the video", tooltip="Text description of the video",
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"aspect_ratio", "aspect_ratio",
options=["16:9", "9:16"], options=["16:9", "9:16"],
default="16:9", default="16:9",
tooltip="Aspect ratio of the output video", tooltip="Aspect ratio of the output video",
), ),
comfy_io.String.Input( IO.String.Input(
"negative_prompt", "negative_prompt",
multiline=True, multiline=True,
default="", default="",
tooltip="Negative text prompt to guide what to avoid in the video", tooltip="Negative text prompt to guide what to avoid in the video",
optional=True, optional=True,
), ),
comfy_io.Int.Input( IO.Int.Input(
"duration_seconds", "duration_seconds",
default=8, default=8,
min=8, min=8,
max=8, max=8,
step=1, step=1,
display_mode=comfy_io.NumberDisplay.number, display_mode=IO.NumberDisplay.number,
tooltip="Duration of the output video in seconds (Veo 3 only supports 8 seconds)", tooltip="Duration of the output video in seconds (Veo 3 only supports 8 seconds)",
optional=True, optional=True,
), ),
comfy_io.Boolean.Input( IO.Boolean.Input(
"enhance_prompt", "enhance_prompt",
default=True, default=True,
tooltip="Whether to enhance the prompt with AI assistance", tooltip="Whether to enhance the prompt with AI assistance",
optional=True, optional=True,
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"person_generation", "person_generation",
options=["ALLOW", "BLOCK"], options=["ALLOW", "BLOCK"],
default="ALLOW", default="ALLOW",
tooltip="Whether to allow generating people in the video", tooltip="Whether to allow generating people in the video",
optional=True, optional=True,
), ),
comfy_io.Int.Input( IO.Int.Input(
"seed", "seed",
default=0, default=0,
min=0, min=0,
max=0xFFFFFFFF, max=0xFFFFFFFF,
step=1, step=1,
display_mode=comfy_io.NumberDisplay.number, display_mode=IO.NumberDisplay.number,
control_after_generate=True, control_after_generate=True,
tooltip="Seed for video generation (0 for random)", tooltip="Seed for video generation (0 for random)",
optional=True, optional=True,
), ),
comfy_io.Image.Input( IO.Image.Input(
"image", "image",
tooltip="Optional reference image to guide video generation", tooltip="Optional reference image to guide video generation",
optional=True, optional=True,
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"model", "model",
options=["veo-3.0-generate-001", "veo-3.0-fast-generate-001"], options=["veo-3.0-generate-001", "veo-3.0-fast-generate-001"],
default="veo-3.0-generate-001", default="veo-3.0-generate-001",
tooltip="Veo 3 model to use for video generation", tooltip="Veo 3 model to use for video generation",
optional=True, optional=True,
), ),
comfy_io.Boolean.Input( IO.Boolean.Input(
"generate_audio", "generate_audio",
default=False, default=False,
tooltip="Generate audio for the video. Supported by all Veo 3 models.", tooltip="Generate audio for the video. Supported by all Veo 3 models.",
@ -398,12 +398,12 @@ class Veo3VideoGenerationNode(VeoVideoGenerationNode):
), ),
], ],
outputs=[ outputs=[
comfy_io.Video.Output(), IO.Video.Output(),
], ],
hidden=[ hidden=[
comfy_io.Hidden.auth_token_comfy_org, IO.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org, IO.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id, IO.Hidden.unique_id,
], ],
is_api_node=True, is_api_node=True,
) )
@ -411,7 +411,7 @@ class Veo3VideoGenerationNode(VeoVideoGenerationNode):
class VeoExtension(ComfyExtension): class VeoExtension(ComfyExtension):
@override @override
async def get_node_list(self) -> list[type[comfy_io.ComfyNode]]: async def get_node_list(self) -> list[type[IO.ComfyNode]]:
return [ return [
VeoVideoGenerationNode, VeoVideoGenerationNode,
Veo3VideoGenerationNode, Veo3VideoGenerationNode,

View File

@ -6,7 +6,7 @@ from typing_extensions import override
import torch import torch
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
from comfy_api.latest import ComfyExtension, io as comfy_io from comfy_api.latest import ComfyExtension, IO
from comfy_api_nodes.util.validation_utils import ( from comfy_api_nodes.util.validation_utils import (
validate_aspect_ratio_closeness, validate_aspect_ratio_closeness,
validate_image_dimensions, validate_image_dimensions,
@ -161,63 +161,63 @@ async def execute_task(
) )
class ViduTextToVideoNode(comfy_io.ComfyNode): class ViduTextToVideoNode(IO.ComfyNode):
@classmethod @classmethod
def define_schema(cls): def define_schema(cls):
return comfy_io.Schema( return IO.Schema(
node_id="ViduTextToVideoNode", node_id="ViduTextToVideoNode",
display_name="Vidu Text To Video Generation", display_name="Vidu Text To Video Generation",
category="api node/video/Vidu", category="api node/video/Vidu",
description="Generate video from text prompt", description="Generate video from text prompt",
inputs=[ inputs=[
comfy_io.Combo.Input( IO.Combo.Input(
"model", "model",
options=VideoModelName, options=VideoModelName,
default=VideoModelName.vidu_q1, default=VideoModelName.vidu_q1,
tooltip="Model name", tooltip="Model name",
), ),
comfy_io.String.Input( IO.String.Input(
"prompt", "prompt",
multiline=True, multiline=True,
tooltip="A textual description for video generation", tooltip="A textual description for video generation",
), ),
comfy_io.Int.Input( IO.Int.Input(
"duration", "duration",
default=5, default=5,
min=5, min=5,
max=5, max=5,
step=1, step=1,
display_mode=comfy_io.NumberDisplay.number, display_mode=IO.NumberDisplay.number,
tooltip="Duration of the output video in seconds", tooltip="Duration of the output video in seconds",
optional=True, optional=True,
), ),
comfy_io.Int.Input( IO.Int.Input(
"seed", "seed",
default=0, default=0,
min=0, min=0,
max=2147483647, max=2147483647,
step=1, step=1,
display_mode=comfy_io.NumberDisplay.number, display_mode=IO.NumberDisplay.number,
control_after_generate=True, control_after_generate=True,
tooltip="Seed for video generation (0 for random)", tooltip="Seed for video generation (0 for random)",
optional=True, optional=True,
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"aspect_ratio", "aspect_ratio",
options=AspectRatio, options=AspectRatio,
default=AspectRatio.r_16_9, default=AspectRatio.r_16_9,
tooltip="The aspect ratio of the output video", tooltip="The aspect ratio of the output video",
optional=True, optional=True,
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"resolution", "resolution",
options=Resolution, options=Resolution,
default=Resolution.r_1080p, default=Resolution.r_1080p,
tooltip="Supported values may vary by model & duration", tooltip="Supported values may vary by model & duration",
optional=True, optional=True,
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"movement_amplitude", "movement_amplitude",
options=MovementAmplitude, options=MovementAmplitude,
default=MovementAmplitude.auto, default=MovementAmplitude.auto,
@ -226,12 +226,12 @@ class ViduTextToVideoNode(comfy_io.ComfyNode):
), ),
], ],
outputs=[ outputs=[
comfy_io.Video.Output(), IO.Video.Output(),
], ],
hidden=[ hidden=[
comfy_io.Hidden.auth_token_comfy_org, IO.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org, IO.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id, IO.Hidden.unique_id,
], ],
is_api_node=True, is_api_node=True,
) )
@ -246,7 +246,7 @@ class ViduTextToVideoNode(comfy_io.ComfyNode):
aspect_ratio: str, aspect_ratio: str,
resolution: str, resolution: str,
movement_amplitude: str, movement_amplitude: str,
) -> comfy_io.NodeOutput: ) -> IO.NodeOutput:
if not prompt: if not prompt:
raise ValueError("The prompt field is required and cannot be empty.") raise ValueError("The prompt field is required and cannot be empty.")
payload = TaskCreationRequest( payload = TaskCreationRequest(
@ -263,65 +263,65 @@ class ViduTextToVideoNode(comfy_io.ComfyNode):
"comfy_api_key": cls.hidden.api_key_comfy_org, "comfy_api_key": cls.hidden.api_key_comfy_org,
} }
results = await execute_task(VIDU_TEXT_TO_VIDEO, auth, payload, 320, cls.hidden.unique_id) results = await execute_task(VIDU_TEXT_TO_VIDEO, auth, payload, 320, cls.hidden.unique_id)
return comfy_io.NodeOutput(await download_url_to_video_output(get_video_from_response(results).url)) return IO.NodeOutput(await download_url_to_video_output(get_video_from_response(results).url))
class ViduImageToVideoNode(comfy_io.ComfyNode): class ViduImageToVideoNode(IO.ComfyNode):
@classmethod @classmethod
def define_schema(cls): def define_schema(cls):
return comfy_io.Schema( return IO.Schema(
node_id="ViduImageToVideoNode", node_id="ViduImageToVideoNode",
display_name="Vidu Image To Video Generation", display_name="Vidu Image To Video Generation",
category="api node/video/Vidu", category="api node/video/Vidu",
description="Generate video from image and optional prompt", description="Generate video from image and optional prompt",
inputs=[ inputs=[
comfy_io.Combo.Input( IO.Combo.Input(
"model", "model",
options=VideoModelName, options=VideoModelName,
default=VideoModelName.vidu_q1, default=VideoModelName.vidu_q1,
tooltip="Model name", tooltip="Model name",
), ),
comfy_io.Image.Input( IO.Image.Input(
"image", "image",
tooltip="An image to be used as the start frame of the generated video", tooltip="An image to be used as the start frame of the generated video",
), ),
comfy_io.String.Input( IO.String.Input(
"prompt", "prompt",
multiline=True, multiline=True,
default="", default="",
tooltip="A textual description for video generation", tooltip="A textual description for video generation",
optional=True, optional=True,
), ),
comfy_io.Int.Input( IO.Int.Input(
"duration", "duration",
default=5, default=5,
min=5, min=5,
max=5, max=5,
step=1, step=1,
display_mode=comfy_io.NumberDisplay.number, display_mode=IO.NumberDisplay.number,
tooltip="Duration of the output video in seconds", tooltip="Duration of the output video in seconds",
optional=True, optional=True,
), ),
comfy_io.Int.Input( IO.Int.Input(
"seed", "seed",
default=0, default=0,
min=0, min=0,
max=2147483647, max=2147483647,
step=1, step=1,
display_mode=comfy_io.NumberDisplay.number, display_mode=IO.NumberDisplay.number,
control_after_generate=True, control_after_generate=True,
tooltip="Seed for video generation (0 for random)", tooltip="Seed for video generation (0 for random)",
optional=True, optional=True,
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"resolution", "resolution",
options=Resolution, options=Resolution,
default=Resolution.r_1080p, default=Resolution.r_1080p,
tooltip="Supported values may vary by model & duration", tooltip="Supported values may vary by model & duration",
optional=True, optional=True,
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"movement_amplitude", "movement_amplitude",
options=MovementAmplitude, options=MovementAmplitude,
default=MovementAmplitude.auto.value, default=MovementAmplitude.auto.value,
@ -330,12 +330,12 @@ class ViduImageToVideoNode(comfy_io.ComfyNode):
), ),
], ],
outputs=[ outputs=[
comfy_io.Video.Output(), IO.Video.Output(),
], ],
hidden=[ hidden=[
comfy_io.Hidden.auth_token_comfy_org, IO.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org, IO.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id, IO.Hidden.unique_id,
], ],
is_api_node=True, is_api_node=True,
) )
@ -350,7 +350,7 @@ class ViduImageToVideoNode(comfy_io.ComfyNode):
seed: int, seed: int,
resolution: str, resolution: str,
movement_amplitude: str, movement_amplitude: str,
) -> comfy_io.NodeOutput: ) -> IO.NodeOutput:
if get_number_of_images(image) > 1: if get_number_of_images(image) > 1:
raise ValueError("Only one input image is allowed.") raise ValueError("Only one input image is allowed.")
validate_image_aspect_ratio_range(image, (1, 4), (4, 1)) validate_image_aspect_ratio_range(image, (1, 4), (4, 1))
@ -373,70 +373,70 @@ class ViduImageToVideoNode(comfy_io.ComfyNode):
auth_kwargs=auth, auth_kwargs=auth,
) )
results = await execute_task(VIDU_IMAGE_TO_VIDEO, auth, payload, 120, cls.hidden.unique_id) results = await execute_task(VIDU_IMAGE_TO_VIDEO, auth, payload, 120, cls.hidden.unique_id)
return comfy_io.NodeOutput(await download_url_to_video_output(get_video_from_response(results).url)) return IO.NodeOutput(await download_url_to_video_output(get_video_from_response(results).url))
class ViduReferenceVideoNode(comfy_io.ComfyNode): class ViduReferenceVideoNode(IO.ComfyNode):
@classmethod @classmethod
def define_schema(cls): def define_schema(cls):
return comfy_io.Schema( return IO.Schema(
node_id="ViduReferenceVideoNode", node_id="ViduReferenceVideoNode",
display_name="Vidu Reference To Video Generation", display_name="Vidu Reference To Video Generation",
category="api node/video/Vidu", category="api node/video/Vidu",
description="Generate video from multiple images and prompt", description="Generate video from multiple images and prompt",
inputs=[ inputs=[
comfy_io.Combo.Input( IO.Combo.Input(
"model", "model",
options=VideoModelName, options=VideoModelName,
default=VideoModelName.vidu_q1, default=VideoModelName.vidu_q1,
tooltip="Model name", tooltip="Model name",
), ),
comfy_io.Image.Input( IO.Image.Input(
"images", "images",
tooltip="Images to use as references to generate a video with consistent subjects (max 7 images).", tooltip="Images to use as references to generate a video with consistent subjects (max 7 images).",
), ),
comfy_io.String.Input( IO.String.Input(
"prompt", "prompt",
multiline=True, multiline=True,
tooltip="A textual description for video generation", tooltip="A textual description for video generation",
), ),
comfy_io.Int.Input( IO.Int.Input(
"duration", "duration",
default=5, default=5,
min=5, min=5,
max=5, max=5,
step=1, step=1,
display_mode=comfy_io.NumberDisplay.number, display_mode=IO.NumberDisplay.number,
tooltip="Duration of the output video in seconds", tooltip="Duration of the output video in seconds",
optional=True, optional=True,
), ),
comfy_io.Int.Input( IO.Int.Input(
"seed", "seed",
default=0, default=0,
min=0, min=0,
max=2147483647, max=2147483647,
step=1, step=1,
display_mode=comfy_io.NumberDisplay.number, display_mode=IO.NumberDisplay.number,
control_after_generate=True, control_after_generate=True,
tooltip="Seed for video generation (0 for random)", tooltip="Seed for video generation (0 for random)",
optional=True, optional=True,
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"aspect_ratio", "aspect_ratio",
options=AspectRatio, options=AspectRatio,
default=AspectRatio.r_16_9, default=AspectRatio.r_16_9,
tooltip="The aspect ratio of the output video", tooltip="The aspect ratio of the output video",
optional=True, optional=True,
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"resolution", "resolution",
options=[model.value for model in Resolution], options=[model.value for model in Resolution],
default=Resolution.r_1080p.value, default=Resolution.r_1080p.value,
tooltip="Supported values may vary by model & duration", tooltip="Supported values may vary by model & duration",
optional=True, optional=True,
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"movement_amplitude", "movement_amplitude",
options=[model.value for model in MovementAmplitude], options=[model.value for model in MovementAmplitude],
default=MovementAmplitude.auto.value, default=MovementAmplitude.auto.value,
@ -445,12 +445,12 @@ class ViduReferenceVideoNode(comfy_io.ComfyNode):
), ),
], ],
outputs=[ outputs=[
comfy_io.Video.Output(), IO.Video.Output(),
], ],
hidden=[ hidden=[
comfy_io.Hidden.auth_token_comfy_org, IO.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org, IO.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id, IO.Hidden.unique_id,
], ],
is_api_node=True, is_api_node=True,
) )
@ -466,7 +466,7 @@ class ViduReferenceVideoNode(comfy_io.ComfyNode):
aspect_ratio: str, aspect_ratio: str,
resolution: str, resolution: str,
movement_amplitude: str, movement_amplitude: str,
) -> comfy_io.NodeOutput: ) -> IO.NodeOutput:
if not prompt: if not prompt:
raise ValueError("The prompt field is required and cannot be empty.") raise ValueError("The prompt field is required and cannot be empty.")
a = get_number_of_images(images) a = get_number_of_images(images)
@ -495,68 +495,68 @@ class ViduReferenceVideoNode(comfy_io.ComfyNode):
auth_kwargs=auth, auth_kwargs=auth,
) )
results = await execute_task(VIDU_REFERENCE_VIDEO, auth, payload, 120, cls.hidden.unique_id) results = await execute_task(VIDU_REFERENCE_VIDEO, auth, payload, 120, cls.hidden.unique_id)
return comfy_io.NodeOutput(await download_url_to_video_output(get_video_from_response(results).url)) return IO.NodeOutput(await download_url_to_video_output(get_video_from_response(results).url))
class ViduStartEndToVideoNode(comfy_io.ComfyNode): class ViduStartEndToVideoNode(IO.ComfyNode):
@classmethod @classmethod
def define_schema(cls): def define_schema(cls):
return comfy_io.Schema( return IO.Schema(
node_id="ViduStartEndToVideoNode", node_id="ViduStartEndToVideoNode",
display_name="Vidu Start End To Video Generation", display_name="Vidu Start End To Video Generation",
category="api node/video/Vidu", category="api node/video/Vidu",
description="Generate a video from start and end frames and a prompt", description="Generate a video from start and end frames and a prompt",
inputs=[ inputs=[
comfy_io.Combo.Input( IO.Combo.Input(
"model", "model",
options=[model.value for model in VideoModelName], options=[model.value for model in VideoModelName],
default=VideoModelName.vidu_q1.value, default=VideoModelName.vidu_q1.value,
tooltip="Model name", tooltip="Model name",
), ),
comfy_io.Image.Input( IO.Image.Input(
"first_frame", "first_frame",
tooltip="Start frame", tooltip="Start frame",
), ),
comfy_io.Image.Input( IO.Image.Input(
"end_frame", "end_frame",
tooltip="End frame", tooltip="End frame",
), ),
comfy_io.String.Input( IO.String.Input(
"prompt", "prompt",
multiline=True, multiline=True,
tooltip="A textual description for video generation", tooltip="A textual description for video generation",
optional=True, optional=True,
), ),
comfy_io.Int.Input( IO.Int.Input(
"duration", "duration",
default=5, default=5,
min=5, min=5,
max=5, max=5,
step=1, step=1,
display_mode=comfy_io.NumberDisplay.number, display_mode=IO.NumberDisplay.number,
tooltip="Duration of the output video in seconds", tooltip="Duration of the output video in seconds",
optional=True, optional=True,
), ),
comfy_io.Int.Input( IO.Int.Input(
"seed", "seed",
default=0, default=0,
min=0, min=0,
max=2147483647, max=2147483647,
step=1, step=1,
display_mode=comfy_io.NumberDisplay.number, display_mode=IO.NumberDisplay.number,
control_after_generate=True, control_after_generate=True,
tooltip="Seed for video generation (0 for random)", tooltip="Seed for video generation (0 for random)",
optional=True, optional=True,
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"resolution", "resolution",
options=[model.value for model in Resolution], options=[model.value for model in Resolution],
default=Resolution.r_1080p.value, default=Resolution.r_1080p.value,
tooltip="Supported values may vary by model & duration", tooltip="Supported values may vary by model & duration",
optional=True, optional=True,
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"movement_amplitude", "movement_amplitude",
options=[model.value for model in MovementAmplitude], options=[model.value for model in MovementAmplitude],
default=MovementAmplitude.auto.value, default=MovementAmplitude.auto.value,
@ -565,12 +565,12 @@ class ViduStartEndToVideoNode(comfy_io.ComfyNode):
), ),
], ],
outputs=[ outputs=[
comfy_io.Video.Output(), IO.Video.Output(),
], ],
hidden=[ hidden=[
comfy_io.Hidden.auth_token_comfy_org, IO.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org, IO.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id, IO.Hidden.unique_id,
], ],
is_api_node=True, is_api_node=True,
) )
@ -586,7 +586,7 @@ class ViduStartEndToVideoNode(comfy_io.ComfyNode):
seed: int, seed: int,
resolution: str, resolution: str,
movement_amplitude: str, movement_amplitude: str,
) -> comfy_io.NodeOutput: ) -> IO.NodeOutput:
validate_aspect_ratio_closeness(first_frame, end_frame, min_rel=0.8, max_rel=1.25, strict=False) validate_aspect_ratio_closeness(first_frame, end_frame, min_rel=0.8, max_rel=1.25, strict=False)
payload = TaskCreationRequest( payload = TaskCreationRequest(
model_name=model, model_name=model,
@ -605,12 +605,12 @@ class ViduStartEndToVideoNode(comfy_io.ComfyNode):
for frame in (first_frame, end_frame) for frame in (first_frame, end_frame)
] ]
results = await execute_task(VIDU_START_END_VIDEO, auth, payload, 96, cls.hidden.unique_id) results = await execute_task(VIDU_START_END_VIDEO, auth, payload, 96, cls.hidden.unique_id)
return comfy_io.NodeOutput(await download_url_to_video_output(get_video_from_response(results).url)) return IO.NodeOutput(await download_url_to_video_output(get_video_from_response(results).url))
class ViduExtension(ComfyExtension): class ViduExtension(ComfyExtension):
@override @override
async def get_node_list(self) -> list[type[comfy_io.ComfyNode]]: async def get_node_list(self) -> list[type[IO.ComfyNode]]:
return [ return [
ViduTextToVideoNode, ViduTextToVideoNode,
ViduImageToVideoNode, ViduImageToVideoNode,

View File

@ -4,7 +4,7 @@ from typing_extensions import override
import torch import torch
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
from comfy_api.latest import ComfyExtension, Input, io as comfy_io from comfy_api.latest import ComfyExtension, Input, IO
from comfy_api_nodes.apis.client import ( from comfy_api_nodes.apis.client import (
ApiEndpoint, ApiEndpoint,
HttpMethod, HttpMethod,
@ -195,35 +195,35 @@ async def process_task(
).execute() ).execute()
class WanTextToImageApi(comfy_io.ComfyNode): class WanTextToImageApi(IO.ComfyNode):
@classmethod @classmethod
def define_schema(cls): def define_schema(cls):
return comfy_io.Schema( return IO.Schema(
node_id="WanTextToImageApi", node_id="WanTextToImageApi",
display_name="Wan Text to Image", display_name="Wan Text to Image",
category="api node/image/Wan", category="api node/image/Wan",
description="Generates image based on text prompt.", description="Generates image based on text prompt.",
inputs=[ inputs=[
comfy_io.Combo.Input( IO.Combo.Input(
"model", "model",
options=["wan2.5-t2i-preview"], options=["wan2.5-t2i-preview"],
default="wan2.5-t2i-preview", default="wan2.5-t2i-preview",
tooltip="Model to use.", tooltip="Model to use.",
), ),
comfy_io.String.Input( IO.String.Input(
"prompt", "prompt",
multiline=True, multiline=True,
default="", default="",
tooltip="Prompt used to describe the elements and visual features, supports English/Chinese.", tooltip="Prompt used to describe the elements and visual features, supports English/Chinese.",
), ),
comfy_io.String.Input( IO.String.Input(
"negative_prompt", "negative_prompt",
multiline=True, multiline=True,
default="", default="",
tooltip="Negative text prompt to guide what to avoid.", tooltip="Negative text prompt to guide what to avoid.",
optional=True, optional=True,
), ),
comfy_io.Int.Input( IO.Int.Input(
"width", "width",
default=1024, default=1024,
min=768, min=768,
@ -231,7 +231,7 @@ class WanTextToImageApi(comfy_io.ComfyNode):
step=32, step=32,
optional=True, optional=True,
), ),
comfy_io.Int.Input( IO.Int.Input(
"height", "height",
default=1024, default=1024,
min=768, min=768,
@ -239,24 +239,24 @@ class WanTextToImageApi(comfy_io.ComfyNode):
step=32, step=32,
optional=True, optional=True,
), ),
comfy_io.Int.Input( IO.Int.Input(
"seed", "seed",
default=0, default=0,
min=0, min=0,
max=2147483647, max=2147483647,
step=1, step=1,
display_mode=comfy_io.NumberDisplay.number, display_mode=IO.NumberDisplay.number,
control_after_generate=True, control_after_generate=True,
tooltip="Seed to use for generation.", tooltip="Seed to use for generation.",
optional=True, optional=True,
), ),
comfy_io.Boolean.Input( IO.Boolean.Input(
"prompt_extend", "prompt_extend",
default=True, default=True,
tooltip="Whether to enhance the prompt with AI assistance.", tooltip="Whether to enhance the prompt with AI assistance.",
optional=True, optional=True,
), ),
comfy_io.Boolean.Input( IO.Boolean.Input(
"watermark", "watermark",
default=True, default=True,
tooltip="Whether to add an \"AI generated\" watermark to the result.", tooltip="Whether to add an \"AI generated\" watermark to the result.",
@ -264,12 +264,12 @@ class WanTextToImageApi(comfy_io.ComfyNode):
), ),
], ],
outputs=[ outputs=[
comfy_io.Image.Output(), IO.Image.Output(),
], ],
hidden=[ hidden=[
comfy_io.Hidden.auth_token_comfy_org, IO.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org, IO.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id, IO.Hidden.unique_id,
], ],
is_api_node=True, is_api_node=True,
) )
@ -309,36 +309,36 @@ class WanTextToImageApi(comfy_io.ComfyNode):
estimated_duration=9, estimated_duration=9,
poll_interval=3, poll_interval=3,
) )
return comfy_io.NodeOutput(await download_url_to_image_tensor(str(response.output.results[0].url))) return IO.NodeOutput(await download_url_to_image_tensor(str(response.output.results[0].url)))
class WanImageToImageApi(comfy_io.ComfyNode): class WanImageToImageApi(IO.ComfyNode):
@classmethod @classmethod
def define_schema(cls): def define_schema(cls):
return comfy_io.Schema( return IO.Schema(
node_id="WanImageToImageApi", node_id="WanImageToImageApi",
display_name="Wan Image to Image", display_name="Wan Image to Image",
category="api node/image/Wan", category="api node/image/Wan",
description="Generates an image from one or two input images and a text prompt. " description="Generates an image from one or two input images and a text prompt. "
"The output image is currently fixed at 1.6 MP; its aspect ratio matches the input image(s).", "The output image is currently fixed at 1.6 MP; its aspect ratio matches the input image(s).",
inputs=[ inputs=[
comfy_io.Combo.Input( IO.Combo.Input(
"model", "model",
options=["wan2.5-i2i-preview"], options=["wan2.5-i2i-preview"],
default="wan2.5-i2i-preview", default="wan2.5-i2i-preview",
tooltip="Model to use.", tooltip="Model to use.",
), ),
comfy_io.Image.Input( IO.Image.Input(
"image", "image",
tooltip="Single-image editing or multi-image fusion, maximum 2 images.", tooltip="Single-image editing or multi-image fusion, maximum 2 images.",
), ),
comfy_io.String.Input( IO.String.Input(
"prompt", "prompt",
multiline=True, multiline=True,
default="", default="",
tooltip="Prompt used to describe the elements and visual features, supports English/Chinese.", tooltip="Prompt used to describe the elements and visual features, supports English/Chinese.",
), ),
comfy_io.String.Input( IO.String.Input(
"negative_prompt", "negative_prompt",
multiline=True, multiline=True,
default="", default="",
@ -346,7 +346,7 @@ class WanImageToImageApi(comfy_io.ComfyNode):
optional=True, optional=True,
), ),
# redo this later as an optional combo of recommended resolutions # redo this later as an optional combo of recommended resolutions
# comfy_io.Int.Input( # IO.Int.Input(
# "width", # "width",
# default=1280, # default=1280,
# min=384, # min=384,
@ -354,7 +354,7 @@ class WanImageToImageApi(comfy_io.ComfyNode):
# step=16, # step=16,
# optional=True, # optional=True,
# ), # ),
# comfy_io.Int.Input( # IO.Int.Input(
# "height", # "height",
# default=1280, # default=1280,
# min=384, # min=384,
@ -362,18 +362,18 @@ class WanImageToImageApi(comfy_io.ComfyNode):
# step=16, # step=16,
# optional=True, # optional=True,
# ), # ),
comfy_io.Int.Input( IO.Int.Input(
"seed", "seed",
default=0, default=0,
min=0, min=0,
max=2147483647, max=2147483647,
step=1, step=1,
display_mode=comfy_io.NumberDisplay.number, display_mode=IO.NumberDisplay.number,
control_after_generate=True, control_after_generate=True,
tooltip="Seed to use for generation.", tooltip="Seed to use for generation.",
optional=True, optional=True,
), ),
comfy_io.Boolean.Input( IO.Boolean.Input(
"watermark", "watermark",
default=True, default=True,
tooltip="Whether to add an \"AI generated\" watermark to the result.", tooltip="Whether to add an \"AI generated\" watermark to the result.",
@ -381,12 +381,12 @@ class WanImageToImageApi(comfy_io.ComfyNode):
), ),
], ],
outputs=[ outputs=[
comfy_io.Image.Output(), IO.Image.Output(),
], ],
hidden=[ hidden=[
comfy_io.Hidden.auth_token_comfy_org, IO.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org, IO.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id, IO.Hidden.unique_id,
], ],
is_api_node=True, is_api_node=True,
) )
@ -431,38 +431,38 @@ class WanImageToImageApi(comfy_io.ComfyNode):
estimated_duration=42, estimated_duration=42,
poll_interval=3, poll_interval=3,
) )
return comfy_io.NodeOutput(await download_url_to_image_tensor(str(response.output.results[0].url))) return IO.NodeOutput(await download_url_to_image_tensor(str(response.output.results[0].url)))
class WanTextToVideoApi(comfy_io.ComfyNode): class WanTextToVideoApi(IO.ComfyNode):
@classmethod @classmethod
def define_schema(cls): def define_schema(cls):
return comfy_io.Schema( return IO.Schema(
node_id="WanTextToVideoApi", node_id="WanTextToVideoApi",
display_name="Wan Text to Video", display_name="Wan Text to Video",
category="api node/video/Wan", category="api node/video/Wan",
description="Generates video based on text prompt.", description="Generates video based on text prompt.",
inputs=[ inputs=[
comfy_io.Combo.Input( IO.Combo.Input(
"model", "model",
options=["wan2.5-t2v-preview"], options=["wan2.5-t2v-preview"],
default="wan2.5-t2v-preview", default="wan2.5-t2v-preview",
tooltip="Model to use.", tooltip="Model to use.",
), ),
comfy_io.String.Input( IO.String.Input(
"prompt", "prompt",
multiline=True, multiline=True,
default="", default="",
tooltip="Prompt used to describe the elements and visual features, supports English/Chinese.", tooltip="Prompt used to describe the elements and visual features, supports English/Chinese.",
), ),
comfy_io.String.Input( IO.String.Input(
"negative_prompt", "negative_prompt",
multiline=True, multiline=True,
default="", default="",
tooltip="Negative text prompt to guide what to avoid.", tooltip="Negative text prompt to guide what to avoid.",
optional=True, optional=True,
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"size", "size",
options=[ options=[
"480p: 1:1 (624x624)", "480p: 1:1 (624x624)",
@ -482,45 +482,45 @@ class WanTextToVideoApi(comfy_io.ComfyNode):
default="480p: 1:1 (624x624)", default="480p: 1:1 (624x624)",
optional=True, optional=True,
), ),
comfy_io.Int.Input( IO.Int.Input(
"duration", "duration",
default=5, default=5,
min=5, min=5,
max=10, max=10,
step=5, step=5,
display_mode=comfy_io.NumberDisplay.number, display_mode=IO.NumberDisplay.number,
tooltip="Available durations: 5 and 10 seconds", tooltip="Available durations: 5 and 10 seconds",
optional=True, optional=True,
), ),
comfy_io.Audio.Input( IO.Audio.Input(
"audio", "audio",
optional=True, optional=True,
tooltip="Audio must contain a clear, loud voice, without extraneous noise, background music.", tooltip="Audio must contain a clear, loud voice, without extraneous noise, background music.",
), ),
comfy_io.Int.Input( IO.Int.Input(
"seed", "seed",
default=0, default=0,
min=0, min=0,
max=2147483647, max=2147483647,
step=1, step=1,
display_mode=comfy_io.NumberDisplay.number, display_mode=IO.NumberDisplay.number,
control_after_generate=True, control_after_generate=True,
tooltip="Seed to use for generation.", tooltip="Seed to use for generation.",
optional=True, optional=True,
), ),
comfy_io.Boolean.Input( IO.Boolean.Input(
"generate_audio", "generate_audio",
default=False, default=False,
optional=True, optional=True,
tooltip="If there is no audio input, generate audio automatically.", tooltip="If there is no audio input, generate audio automatically.",
), ),
comfy_io.Boolean.Input( IO.Boolean.Input(
"prompt_extend", "prompt_extend",
default=True, default=True,
tooltip="Whether to enhance the prompt with AI assistance.", tooltip="Whether to enhance the prompt with AI assistance.",
optional=True, optional=True,
), ),
comfy_io.Boolean.Input( IO.Boolean.Input(
"watermark", "watermark",
default=True, default=True,
tooltip="Whether to add an \"AI generated\" watermark to the result.", tooltip="Whether to add an \"AI generated\" watermark to the result.",
@ -528,12 +528,12 @@ class WanTextToVideoApi(comfy_io.ComfyNode):
), ),
], ],
outputs=[ outputs=[
comfy_io.Video.Output(), IO.Video.Output(),
], ],
hidden=[ hidden=[
comfy_io.Hidden.auth_token_comfy_org, IO.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org, IO.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id, IO.Hidden.unique_id,
], ],
is_api_node=True, is_api_node=True,
) )
@ -582,41 +582,41 @@ class WanTextToVideoApi(comfy_io.ComfyNode):
estimated_duration=120 * int(duration / 5), estimated_duration=120 * int(duration / 5),
poll_interval=6, poll_interval=6,
) )
return comfy_io.NodeOutput(await download_url_to_video_output(response.output.video_url)) return IO.NodeOutput(await download_url_to_video_output(response.output.video_url))
class WanImageToVideoApi(comfy_io.ComfyNode): class WanImageToVideoApi(IO.ComfyNode):
@classmethod @classmethod
def define_schema(cls): def define_schema(cls):
return comfy_io.Schema( return IO.Schema(
node_id="WanImageToVideoApi", node_id="WanImageToVideoApi",
display_name="Wan Image to Video", display_name="Wan Image to Video",
category="api node/video/Wan", category="api node/video/Wan",
description="Generates video based on the first frame and text prompt.", description="Generates video based on the first frame and text prompt.",
inputs=[ inputs=[
comfy_io.Combo.Input( IO.Combo.Input(
"model", "model",
options=["wan2.5-i2v-preview"], options=["wan2.5-i2v-preview"],
default="wan2.5-i2v-preview", default="wan2.5-i2v-preview",
tooltip="Model to use.", tooltip="Model to use.",
), ),
comfy_io.Image.Input( IO.Image.Input(
"image", "image",
), ),
comfy_io.String.Input( IO.String.Input(
"prompt", "prompt",
multiline=True, multiline=True,
default="", default="",
tooltip="Prompt used to describe the elements and visual features, supports English/Chinese.", tooltip="Prompt used to describe the elements and visual features, supports English/Chinese.",
), ),
comfy_io.String.Input( IO.String.Input(
"negative_prompt", "negative_prompt",
multiline=True, multiline=True,
default="", default="",
tooltip="Negative text prompt to guide what to avoid.", tooltip="Negative text prompt to guide what to avoid.",
optional=True, optional=True,
), ),
comfy_io.Combo.Input( IO.Combo.Input(
"resolution", "resolution",
options=[ options=[
"480P", "480P",
@ -626,45 +626,45 @@ class WanImageToVideoApi(comfy_io.ComfyNode):
default="480P", default="480P",
optional=True, optional=True,
), ),
comfy_io.Int.Input( IO.Int.Input(
"duration", "duration",
default=5, default=5,
min=5, min=5,
max=10, max=10,
step=5, step=5,
display_mode=comfy_io.NumberDisplay.number, display_mode=IO.NumberDisplay.number,
tooltip="Available durations: 5 and 10 seconds", tooltip="Available durations: 5 and 10 seconds",
optional=True, optional=True,
), ),
comfy_io.Audio.Input( IO.Audio.Input(
"audio", "audio",
optional=True, optional=True,
tooltip="Audio must contain a clear, loud voice, without extraneous noise, background music.", tooltip="Audio must contain a clear, loud voice, without extraneous noise, background music.",
), ),
comfy_io.Int.Input( IO.Int.Input(
"seed", "seed",
default=0, default=0,
min=0, min=0,
max=2147483647, max=2147483647,
step=1, step=1,
display_mode=comfy_io.NumberDisplay.number, display_mode=IO.NumberDisplay.number,
control_after_generate=True, control_after_generate=True,
tooltip="Seed to use for generation.", tooltip="Seed to use for generation.",
optional=True, optional=True,
), ),
comfy_io.Boolean.Input( IO.Boolean.Input(
"generate_audio", "generate_audio",
default=False, default=False,
optional=True, optional=True,
tooltip="If there is no audio input, generate audio automatically.", tooltip="If there is no audio input, generate audio automatically.",
), ),
comfy_io.Boolean.Input( IO.Boolean.Input(
"prompt_extend", "prompt_extend",
default=True, default=True,
tooltip="Whether to enhance the prompt with AI assistance.", tooltip="Whether to enhance the prompt with AI assistance.",
optional=True, optional=True,
), ),
comfy_io.Boolean.Input( IO.Boolean.Input(
"watermark", "watermark",
default=True, default=True,
tooltip="Whether to add an \"AI generated\" watermark to the result.", tooltip="Whether to add an \"AI generated\" watermark to the result.",
@ -672,12 +672,12 @@ class WanImageToVideoApi(comfy_io.ComfyNode):
), ),
], ],
outputs=[ outputs=[
comfy_io.Video.Output(), IO.Video.Output(),
], ],
hidden=[ hidden=[
comfy_io.Hidden.auth_token_comfy_org, IO.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org, IO.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id, IO.Hidden.unique_id,
], ],
is_api_node=True, is_api_node=True,
) )
@ -731,12 +731,12 @@ class WanImageToVideoApi(comfy_io.ComfyNode):
estimated_duration=120 * int(duration / 5), estimated_duration=120 * int(duration / 5),
poll_interval=6, poll_interval=6,
) )
return comfy_io.NodeOutput(await download_url_to_video_output(response.output.video_url)) return IO.NodeOutput(await download_url_to_video_output(response.output.video_url))
class WanApiExtension(ComfyExtension): class WanApiExtension(ComfyExtension):
@override @override
async def get_node_list(self) -> list[type[comfy_io.ComfyNode]]: async def get_node_list(self) -> list[type[IO.ComfyNode]]:
return [ return [
WanTextToImageApi, WanTextToImageApi,
WanImageToImageApi, WanImageToImageApi,