diff --git a/comfy/text_encoders/qwen25_tokenizer/tokenizer_config.json b/comfy/text_encoders/qwen25_tokenizer/tokenizer_config.json
index 67688e82c..df5b5d7fe 100644
--- a/comfy/text_encoders/qwen25_tokenizer/tokenizer_config.json
+++ b/comfy/text_encoders/qwen25_tokenizer/tokenizer_config.json
@@ -179,36 +179,36 @@
"special": false
},
"151665": {
- "content": "<|img|>",
+ "content": "",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
- "special": true
+ "special": false
},
"151666": {
- "content": "<|endofimg|>",
+ "content": "",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
- "special": true
+ "special": false
},
"151667": {
- "content": "<|meta|>",
+ "content": "",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
- "special": true
+ "special": false
},
"151668": {
- "content": "<|endofmeta|>",
+ "content": "",
"lstrip": false,
"normalized": false,
"rstrip": false,
"single_word": false,
- "special": true
+ "special": false
}
},
"additional_special_tokens": [
diff --git a/comfy_api/latest/_input_impl/video_types.py b/comfy_api/latest/_input_impl/video_types.py
index bde37f90a..7231bf13c 100644
--- a/comfy_api/latest/_input_impl/video_types.py
+++ b/comfy_api/latest/_input_impl/video_types.py
@@ -336,7 +336,10 @@ class VideoFromComponents(VideoInput):
raise ValueError("Only MP4 format is supported for now")
if codec != VideoCodec.AUTO and codec != VideoCodec.H264:
raise ValueError("Only H264 codec is supported for now")
- with av.open(path, mode='w', options={'movflags': 'use_metadata_tags'}) as output:
+ extra_kwargs = {}
+ if format != VideoContainer.AUTO:
+ extra_kwargs["format"] = format.value
+ with av.open(path, mode='w', options={'movflags': 'use_metadata_tags'}, **extra_kwargs) as output:
# Add metadata before writing any streams
if metadata is not None:
for key, value in metadata.items():
diff --git a/comfy_api_nodes/apis/kling_api.py b/comfy_api_nodes/apis/kling_api.py
new file mode 100644
index 000000000..0a3b447c5
--- /dev/null
+++ b/comfy_api_nodes/apis/kling_api.py
@@ -0,0 +1,66 @@
+from pydantic import BaseModel, Field
+
+
+class OmniProText2VideoRequest(BaseModel):
+ model_name: str = Field(..., description="kling-video-o1")
+ aspect_ratio: str = Field(..., description="'16:9', '9:16' or '1:1'")
+ duration: str = Field(..., description="'5' or '10'")
+ prompt: str = Field(...)
+ mode: str = Field("pro")
+
+
+class OmniParamImage(BaseModel):
+ image_url: str = Field(...)
+ type: str | None = Field(None, description="Can be 'first_frame' or 'end_frame'")
+
+
+class OmniParamVideo(BaseModel):
+ video_url: str = Field(...)
+ refer_type: str | None = Field(..., description="Can be 'base' or 'feature'")
+ keep_original_sound: str = Field(..., description="'yes' or 'no'")
+
+
+class OmniProFirstLastFrameRequest(BaseModel):
+ model_name: str = Field(..., description="kling-video-o1")
+ image_list: list[OmniParamImage] = Field(..., min_length=1, max_length=7)
+ duration: str = Field(..., description="'5' or '10'")
+ prompt: str = Field(...)
+ mode: str = Field("pro")
+
+
+class OmniProReferences2VideoRequest(BaseModel):
+ model_name: str = Field(..., description="kling-video-o1")
+ aspect_ratio: str | None = Field(..., description="'16:9', '9:16' or '1:1'")
+ image_list: list[OmniParamImage] | None = Field(
+ None, max_length=7, description="Max length 4 when video is present."
+ )
+ video_list: list[OmniParamVideo] | None = Field(None, max_length=1)
+ duration: str | None = Field(..., description="From 3 to 10.")
+ prompt: str = Field(...)
+ mode: str = Field("pro")
+
+
+class TaskStatusVideoResult(BaseModel):
+ duration: str | None = Field(None, description="Total video duration")
+ id: str | None = Field(None, description="Generated video ID")
+ url: str | None = Field(None, description="URL for generated video")
+
+
+class TaskStatusVideoResults(BaseModel):
+ videos: list[TaskStatusVideoResult] | None = Field(None)
+
+
+class TaskStatusVideoResponseData(BaseModel):
+ created_at: int | None = Field(None, description="Task creation time")
+ updated_at: int | None = Field(None, description="Task update time")
+ task_status: str | None = None
+ task_status_msg: str | None = Field(None, description="Additional failure reason. Only for polling endpoint.")
+ task_id: str | None = Field(None, description="Task ID")
+ task_result: TaskStatusVideoResults | None = Field(None)
+
+
+class TaskStatusVideoResponse(BaseModel):
+ code: int | None = Field(None, description="Error code")
+ message: str | None = Field(None, description="Error message")
+ request_id: str | None = Field(None, description="Request ID")
+ data: TaskStatusVideoResponseData | None = Field(None)
diff --git a/comfy_api_nodes/nodes_kling.py b/comfy_api_nodes/nodes_kling.py
index 23a7f55f1..850c44db6 100644
--- a/comfy_api_nodes/nodes_kling.py
+++ b/comfy_api_nodes/nodes_kling.py
@@ -4,13 +4,13 @@ For source of truth on the allowed permutations of request fields, please refere
- [Compatibility Table](https://app.klingai.com/global/dev/document-api/apiReference/model/skillsMap)
"""
-import math
import logging
-
-from typing_extensions import override
+import math
import torch
+from typing_extensions import override
+from comfy_api.latest import IO, ComfyExtension, Input, InputImpl
from comfy_api_nodes.apis import (
KlingCameraControl,
KlingCameraConfig,
@@ -48,23 +48,31 @@ from comfy_api_nodes.apis import (
KlingCharacterEffectModelName,
KlingSingleImageEffectModelName,
)
+from comfy_api_nodes.apis.kling_api import (
+ OmniParamImage,
+ OmniParamVideo,
+ OmniProFirstLastFrameRequest,
+ OmniProReferences2VideoRequest,
+ OmniProText2VideoRequest,
+ TaskStatusVideoResponse,
+)
from comfy_api_nodes.util import (
- validate_image_dimensions,
+ ApiEndpoint,
+ download_url_to_image_tensor,
+ download_url_to_video_output,
+ get_number_of_images,
+ poll_op,
+ sync_op,
+ tensor_to_base64_string,
+ upload_audio_to_comfyapi,
+ upload_images_to_comfyapi,
+ upload_video_to_comfyapi,
validate_image_aspect_ratio,
+ validate_image_dimensions,
+ validate_string,
validate_video_dimensions,
validate_video_duration,
- tensor_to_base64_string,
- validate_string,
- upload_audio_to_comfyapi,
- download_url_to_image_tensor,
- upload_video_to_comfyapi,
- download_url_to_video_output,
- sync_op,
- ApiEndpoint,
- poll_op,
)
-from comfy_api.input_impl import VideoFromFile
-from comfy_api.latest import ComfyExtension, IO, Input
KLING_API_VERSION = "v1"
PATH_TEXT_TO_VIDEO = f"/proxy/kling/{KLING_API_VERSION}/videos/text2video"
@@ -202,6 +210,20 @@ VOICES_CONFIG = {
}
+async def finish_omni_video_task(cls: type[IO.ComfyNode], response: TaskStatusVideoResponse) -> IO.NodeOutput:
+ if response.code:
+ raise RuntimeError(
+ f"Kling request failed. Code: {response.code}, Message: {response.message}, Data: {response.data}"
+ )
+ final_response = await poll_op(
+ cls,
+ ApiEndpoint(path=f"/proxy/kling/v1/videos/omni-video/{response.data.task_id}"),
+ response_model=TaskStatusVideoResponse,
+ status_extractor=lambda r: (r.data.task_status if r.data else None),
+ )
+ return IO.NodeOutput(await download_url_to_video_output(final_response.data.task_result.videos[0].url))
+
+
def is_valid_camera_control_configs(configs: list[float]) -> bool:
"""Verifies that at least one camera control configuration is non-zero."""
return any(not math.isclose(value, 0.0) for value in configs)
@@ -449,7 +471,7 @@ async def execute_video_effect(
image_1: torch.Tensor,
image_2: torch.Tensor | None = None,
model_mode: KlingVideoGenMode | None = None,
-) -> tuple[VideoFromFile, str, str]:
+) -> tuple[InputImpl.VideoFromFile, str, str]:
if dual_character:
request_input_field = KlingDualCharacterEffectInput(
model_name=model_name,
@@ -736,6 +758,386 @@ class KlingTextToVideoNode(IO.ComfyNode):
)
+class OmniProTextToVideoNode(IO.ComfyNode):
+
+ @classmethod
+ def define_schema(cls) -> IO.Schema:
+ return IO.Schema(
+ node_id="KlingOmniProTextToVideoNode",
+ display_name="Kling Omni Text to Video (Pro)",
+ category="api node/video/Kling",
+ description="Use text prompts to generate videos with the latest Kling model.",
+ inputs=[
+ IO.Combo.Input("model_name", options=["kling-video-o1"]),
+ IO.String.Input(
+ "prompt",
+ multiline=True,
+ tooltip="A text prompt describing the video content. "
+ "This can include both positive and negative descriptions.",
+ ),
+ IO.Combo.Input("aspect_ratio", options=["16:9", "9:16", "1:1"]),
+ IO.Combo.Input("duration", options=[5, 10]),
+ ],
+ outputs=[
+ IO.Video.Output(),
+ ],
+ hidden=[
+ IO.Hidden.auth_token_comfy_org,
+ IO.Hidden.api_key_comfy_org,
+ IO.Hidden.unique_id,
+ ],
+ is_api_node=True,
+ )
+
+ @classmethod
+ async def execute(
+ cls,
+ model_name: str,
+ prompt: str,
+ aspect_ratio: str,
+ duration: int,
+ ) -> IO.NodeOutput:
+ validate_string(prompt, min_length=1, max_length=2500)
+ response = await sync_op(
+ cls,
+ ApiEndpoint(path="/proxy/kling/v1/videos/omni-video", method="POST"),
+ response_model=TaskStatusVideoResponse,
+ data=OmniProText2VideoRequest(
+ model_name=model_name,
+ prompt=prompt,
+ aspect_ratio=aspect_ratio,
+ duration=str(duration),
+ ),
+ )
+ return await finish_omni_video_task(cls, response)
+
+
+class OmniProFirstLastFrameNode(IO.ComfyNode):
+
+ @classmethod
+ def define_schema(cls) -> IO.Schema:
+ return IO.Schema(
+ node_id="KlingOmniProFirstLastFrameNode",
+ display_name="Kling Omni First-Last-Frame to Video (Pro)",
+ category="api node/video/Kling",
+ description="Use a start frame, an optional end frame, or reference images with the latest Kling model.",
+ inputs=[
+ IO.Combo.Input("model_name", options=["kling-video-o1"]),
+ IO.String.Input(
+ "prompt",
+ multiline=True,
+ tooltip="A text prompt describing the video content. "
+ "This can include both positive and negative descriptions.",
+ ),
+ IO.Combo.Input("duration", options=["5", "10"]),
+ IO.Image.Input("first_frame"),
+ IO.Image.Input(
+ "end_frame",
+ optional=True,
+ tooltip="An optional end frame for the video. "
+ "This cannot be used simultaneously with 'reference_images'.",
+ ),
+ IO.Image.Input(
+ "reference_images",
+ optional=True,
+ tooltip="Up to 6 additional reference images.",
+ ),
+ ],
+ outputs=[
+ IO.Video.Output(),
+ ],
+ hidden=[
+ IO.Hidden.auth_token_comfy_org,
+ IO.Hidden.api_key_comfy_org,
+ IO.Hidden.unique_id,
+ ],
+ is_api_node=True,
+ )
+
+ @classmethod
+ async def execute(
+ cls,
+ model_name: str,
+ prompt: str,
+ duration: int,
+ first_frame: Input.Image,
+ end_frame: Input.Image | None = None,
+ reference_images: Input.Image | None = None,
+ ) -> IO.NodeOutput:
+ validate_string(prompt, min_length=1, max_length=2500)
+ if end_frame is not None and reference_images is not None:
+ raise ValueError("The 'end_frame' input cannot be used simultaneously with 'reference_images'.")
+ validate_image_dimensions(first_frame, min_width=300, min_height=300)
+ validate_image_aspect_ratio(first_frame, (1, 2.5), (2.5, 1))
+ image_list: list[OmniParamImage] = [
+ OmniParamImage(
+ image_url=(await upload_images_to_comfyapi(cls, first_frame, wait_label="Uploading first frame"))[0],
+ type="first_frame",
+ )
+ ]
+ if end_frame is not None:
+ validate_image_dimensions(end_frame, min_width=300, min_height=300)
+ validate_image_aspect_ratio(end_frame, (1, 2.5), (2.5, 1))
+ image_list.append(
+ OmniParamImage(
+ image_url=(await upload_images_to_comfyapi(cls, end_frame, wait_label="Uploading end frame"))[0],
+ type="end_frame",
+ )
+ )
+ if reference_images is not None:
+ if get_number_of_images(reference_images) > 6:
+ raise ValueError("The maximum number of reference images allowed is 6.")
+ for i in reference_images:
+ validate_image_dimensions(i, min_width=300, min_height=300)
+ validate_image_aspect_ratio(i, (1, 2.5), (2.5, 1))
+ for i in await upload_images_to_comfyapi(cls, reference_images, wait_label="Uploading reference frame(s)"):
+ image_list.append(OmniParamImage(image_url=i))
+ response = await sync_op(
+ cls,
+ ApiEndpoint(path="/proxy/kling/v1/videos/omni-video", method="POST"),
+ response_model=TaskStatusVideoResponse,
+ data=OmniProFirstLastFrameRequest(
+ model_name=model_name,
+ prompt=prompt,
+ duration=str(duration),
+ image_list=image_list,
+ ),
+ )
+ return await finish_omni_video_task(cls, response)
+
+
+class OmniProImageToVideoNode(IO.ComfyNode):
+
+ @classmethod
+ def define_schema(cls) -> IO.Schema:
+ return IO.Schema(
+ node_id="KlingOmniProImageToVideoNode",
+ display_name="Kling Omni Image to Video (Pro)",
+ category="api node/video/Kling",
+ description="Use up to 7 reference images to generate a video with the latest Kling model.",
+ inputs=[
+ IO.Combo.Input("model_name", options=["kling-video-o1"]),
+ IO.String.Input(
+ "prompt",
+ multiline=True,
+ tooltip="A text prompt describing the video content. "
+ "This can include both positive and negative descriptions.",
+ ),
+ IO.Combo.Input("aspect_ratio", options=["16:9", "9:16", "1:1"]),
+ IO.Int.Input("duration", default=3, min=3, max=10, display_mode=IO.NumberDisplay.slider),
+ IO.Image.Input(
+ "reference_images",
+ tooltip="Up to 7 reference images.",
+ ),
+ ],
+ outputs=[
+ IO.Video.Output(),
+ ],
+ hidden=[
+ IO.Hidden.auth_token_comfy_org,
+ IO.Hidden.api_key_comfy_org,
+ IO.Hidden.unique_id,
+ ],
+ is_api_node=True,
+ )
+
+ @classmethod
+ async def execute(
+ cls,
+ model_name: str,
+ prompt: str,
+ aspect_ratio: str,
+ duration: int,
+ reference_images: Input.Image,
+ ) -> IO.NodeOutput:
+ validate_string(prompt, min_length=1, max_length=2500)
+ if get_number_of_images(reference_images) > 7:
+ raise ValueError("The maximum number of reference images is 7.")
+ for i in reference_images:
+ validate_image_dimensions(i, min_width=300, min_height=300)
+ validate_image_aspect_ratio(i, (1, 2.5), (2.5, 1))
+ image_list: list[OmniParamImage] = []
+ for i in await upload_images_to_comfyapi(cls, reference_images, wait_label="Uploading reference image"):
+ image_list.append(OmniParamImage(image_url=i))
+ response = await sync_op(
+ cls,
+ ApiEndpoint(path="/proxy/kling/v1/videos/omni-video", method="POST"),
+ response_model=TaskStatusVideoResponse,
+ data=OmniProReferences2VideoRequest(
+ model_name=model_name,
+ prompt=prompt,
+ aspect_ratio=aspect_ratio,
+ duration=str(duration),
+ image_list=image_list,
+ ),
+ )
+ return await finish_omni_video_task(cls, response)
+
+
+class OmniProVideoToVideoNode(IO.ComfyNode):
+
+ @classmethod
+ def define_schema(cls) -> IO.Schema:
+ return IO.Schema(
+ node_id="KlingOmniProVideoToVideoNode",
+ display_name="Kling Omni Video to Video (Pro)",
+ category="api node/video/Kling",
+ description="Use a video and up to 4 reference images to generate a video with the latest Kling model.",
+ inputs=[
+ IO.Combo.Input("model_name", options=["kling-video-o1"]),
+ IO.String.Input(
+ "prompt",
+ multiline=True,
+ tooltip="A text prompt describing the video content. "
+ "This can include both positive and negative descriptions.",
+ ),
+ IO.Combo.Input("aspect_ratio", options=["16:9", "9:16", "1:1"]),
+ IO.Int.Input("duration", default=3, min=3, max=10, display_mode=IO.NumberDisplay.slider),
+ IO.Video.Input("reference_video", tooltip="Video to use as a reference."),
+ IO.Boolean.Input("keep_original_sound", default=True),
+ IO.Image.Input(
+ "reference_images",
+ tooltip="Up to 4 additional reference images.",
+ optional=True,
+ ),
+ ],
+ outputs=[
+ IO.Video.Output(),
+ ],
+ hidden=[
+ IO.Hidden.auth_token_comfy_org,
+ IO.Hidden.api_key_comfy_org,
+ IO.Hidden.unique_id,
+ ],
+ is_api_node=True,
+ )
+
+ @classmethod
+ async def execute(
+ cls,
+ model_name: str,
+ prompt: str,
+ aspect_ratio: str,
+ duration: int,
+ reference_video: Input.Video,
+ keep_original_sound: bool,
+ reference_images: Input.Image | None = None,
+ ) -> IO.NodeOutput:
+ validate_string(prompt, min_length=1, max_length=2500)
+ validate_video_duration(reference_video, min_duration=3.0, max_duration=10.05)
+ validate_video_dimensions(reference_video, min_width=720, min_height=720, max_width=2160, max_height=2160)
+ image_list: list[OmniParamImage] = []
+ if reference_images is not None:
+ if get_number_of_images(reference_images) > 4:
+ raise ValueError("The maximum number of reference images allowed with a video input is 4.")
+ for i in reference_images:
+ validate_image_dimensions(i, min_width=300, min_height=300)
+ validate_image_aspect_ratio(i, (1, 2.5), (2.5, 1))
+ for i in await upload_images_to_comfyapi(cls, reference_images, wait_label="Uploading reference image"):
+ image_list.append(OmniParamImage(image_url=i))
+ video_list = [
+ OmniParamVideo(
+ video_url=await upload_video_to_comfyapi(cls, reference_video, wait_label="Uploading reference video"),
+ refer_type="feature",
+ keep_original_sound="yes" if keep_original_sound else "no",
+ )
+ ]
+ response = await sync_op(
+ cls,
+ ApiEndpoint(path="/proxy/kling/v1/videos/omni-video", method="POST"),
+ response_model=TaskStatusVideoResponse,
+ data=OmniProReferences2VideoRequest(
+ model_name=model_name,
+ prompt=prompt,
+ aspect_ratio=aspect_ratio,
+ duration=str(duration),
+ image_list=image_list if image_list else None,
+ video_list=video_list,
+ ),
+ )
+ return await finish_omni_video_task(cls, response)
+
+
+class OmniProEditVideoNode(IO.ComfyNode):
+
+ @classmethod
+ def define_schema(cls) -> IO.Schema:
+ return IO.Schema(
+ node_id="KlingOmniProEditVideoNode",
+ display_name="Kling Omni Edit Video (Pro)",
+ category="api node/video/Kling",
+ description="Edit an existing video with the latest model from Kling.",
+ inputs=[
+ IO.Combo.Input("model_name", options=["kling-video-o1"]),
+ IO.String.Input(
+ "prompt",
+ multiline=True,
+ tooltip="A text prompt describing the video content. "
+ "This can include both positive and negative descriptions.",
+ ),
+ IO.Video.Input("video", tooltip="Video for editing. The output video length will be the same."),
+ IO.Boolean.Input("keep_original_sound", default=True),
+ IO.Image.Input(
+ "reference_images",
+ tooltip="Up to 4 additional reference images.",
+ optional=True,
+ ),
+ ],
+ outputs=[
+ IO.Video.Output(),
+ ],
+ hidden=[
+ IO.Hidden.auth_token_comfy_org,
+ IO.Hidden.api_key_comfy_org,
+ IO.Hidden.unique_id,
+ ],
+ is_api_node=True,
+ )
+
+ @classmethod
+ async def execute(
+ cls,
+ model_name: str,
+ prompt: str,
+ video: Input.Video,
+ keep_original_sound: bool,
+ reference_images: Input.Image | None = None,
+ ) -> IO.NodeOutput:
+ validate_string(prompt, min_length=1, max_length=2500)
+ validate_video_duration(video, min_duration=3.0, max_duration=10.05)
+ validate_video_dimensions(video, min_width=720, min_height=720, max_width=2160, max_height=2160)
+ image_list: list[OmniParamImage] = []
+ if reference_images is not None:
+ if get_number_of_images(reference_images) > 4:
+ raise ValueError("The maximum number of reference images allowed with a video input is 4.")
+ for i in reference_images:
+ validate_image_dimensions(i, min_width=300, min_height=300)
+ validate_image_aspect_ratio(i, (1, 2.5), (2.5, 1))
+ for i in await upload_images_to_comfyapi(cls, reference_images, wait_label="Uploading reference image"):
+ image_list.append(OmniParamImage(image_url=i))
+ video_list = [
+ OmniParamVideo(
+ video_url=await upload_video_to_comfyapi(cls, video, wait_label="Uploading base video"),
+ refer_type="base",
+ keep_original_sound="yes" if keep_original_sound else "no",
+ )
+ ]
+ response = await sync_op(
+ cls,
+ ApiEndpoint(path="/proxy/kling/v1/videos/omni-video", method="POST"),
+ response_model=TaskStatusVideoResponse,
+ data=OmniProReferences2VideoRequest(
+ model_name=model_name,
+ prompt=prompt,
+ aspect_ratio=None,
+ duration=None,
+ image_list=image_list if image_list else None,
+ video_list=video_list,
+ ),
+ )
+ return await finish_omni_video_task(cls, response)
+
+
class KlingCameraControlT2VNode(IO.ComfyNode):
"""
Kling Text to Video Camera Control Node. This node is a text to video node, but it supports controlling the camera.
@@ -1162,7 +1564,10 @@ class KlingSingleImageVideoEffectNode(IO.ComfyNode):
category="api node/video/Kling",
description="Achieve different special effects when generating a video based on the effect_scene.",
inputs=[
- IO.Image.Input("image", tooltip=" Reference Image. URL or Base64 encoded string (without data:image prefix). File size cannot exceed 10MB, resolution not less than 300*300px, aspect ratio between 1:2.5 ~ 2.5:1"),
+ IO.Image.Input(
+ "image",
+ tooltip=" Reference Image. URL or Base64 encoded string (without data:image prefix). File size cannot exceed 10MB, resolution not less than 300*300px, aspect ratio between 1:2.5 ~ 2.5:1",
+ ),
IO.Combo.Input(
"effect_scene",
options=[i.value for i in KlingSingleImageEffectsScene],
@@ -1525,6 +1930,11 @@ class KlingExtension(ComfyExtension):
KlingImageGenerationNode,
KlingSingleImageVideoEffectNode,
KlingDualCharacterVideoEffectNode,
+ OmniProTextToVideoNode,
+ OmniProFirstLastFrameNode,
+ OmniProImageToVideoNode,
+ OmniProVideoToVideoNode,
+ OmniProEditVideoNode,
]
diff --git a/comfy_api_nodes/util/upload_helpers.py b/comfy_api_nodes/util/upload_helpers.py
index b9019841f..0532bea9a 100644
--- a/comfy_api_nodes/util/upload_helpers.py
+++ b/comfy_api_nodes/util/upload_helpers.py
@@ -103,6 +103,7 @@ async def upload_video_to_comfyapi(
container: VideoContainer = VideoContainer.MP4,
codec: VideoCodec = VideoCodec.H264,
max_duration: Optional[int] = None,
+ wait_label: str | None = "Uploading",
) -> str:
"""
Uploads a single video to ComfyUI API and returns its download URL.
@@ -127,7 +128,7 @@ async def upload_video_to_comfyapi(
video.save_to(video_bytes_io, format=container, codec=codec)
video_bytes_io.seek(0)
- return await upload_file_to_comfyapi(cls, video_bytes_io, filename, upload_mime_type)
+ return await upload_file_to_comfyapi(cls, video_bytes_io, filename, upload_mime_type, wait_label)
async def upload_file_to_comfyapi(
diff --git a/requirements.txt b/requirements.txt
index 386477808..045b2ac54 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,4 +1,4 @@
-comfyui-frontend-package==1.32.9
+comfyui-frontend-package==1.32.10
comfyui-workflow-templates==0.7.25
comfyui-embedded-docs==0.3.1
torch