[Partner Nodes] HappyHorse model (#13582)

* feat(api-nodes): add nodes for HappyHorse model

Signed-off-by: bigcat88 <bigcat88@icloud.com>

* fix price badges

Signed-off-by: bigcat88 <bigcat88@icloud.com>

* fix: allow durations up to 15 s

Signed-off-by: bigcat88 <bigcat88@icloud.com>

---------

Signed-off-by: bigcat88 <bigcat88@icloud.com>
This commit is contained in:
Alexander Piskun 2026-04-27 19:53:08 +03:00 committed by GitHub
parent 115f418b64
commit 6968a70e60
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 557 additions and 2 deletions

View File

@ -118,7 +118,7 @@ class Wan27ReferenceVideoInputField(BaseModel):
class Wan27ReferenceVideoParametersField(BaseModel):
resolution: str = Field(...)
ratio: str | None = Field(None)
duration: int = Field(5, ge=2, le=10)
duration: int = Field(5, ge=2, le=15)
watermark: bool = Field(False)
seed: int = Field(..., ge=0, le=2147483647)
@ -157,7 +157,7 @@ class Wan27VideoEditInputField(BaseModel):
class Wan27VideoEditParametersField(BaseModel):
resolution: str = Field(...)
ratio: str | None = Field(None)
duration: int = Field(0)
duration: int | None = Field(0)
audio_setting: str = Field("auto")
watermark: bool = Field(False)
seed: int = Field(..., ge=0, le=2147483647)

View File

@ -1646,6 +1646,557 @@ class Wan2ReferenceVideoApi(IO.ComfyNode):
return IO.NodeOutput(await download_url_to_video_output(response.output.video_url))
class HappyHorseTextToVideoApi(IO.ComfyNode):
@classmethod
def define_schema(cls):
return IO.Schema(
node_id="HappyHorseTextToVideoApi",
display_name="HappyHorse Text to Video",
category="api node/video/Wan",
description="Generates a video based on a text prompt using the HappyHorse model.",
inputs=[
IO.DynamicCombo.Input(
"model",
options=[
IO.DynamicCombo.Option(
"happyhorse-1.0-t2v",
[
IO.String.Input(
"prompt",
multiline=True,
default="",
tooltip="Prompt describing the elements and visual features. "
"Supports English and Chinese.",
),
IO.Combo.Input(
"resolution",
options=["720P", "1080P"],
),
IO.Combo.Input(
"ratio",
options=["16:9", "9:16", "1:1", "4:3", "3:4"],
),
IO.Int.Input(
"duration",
default=5,
min=3,
max=15,
step=1,
display_mode=IO.NumberDisplay.number,
),
],
),
],
),
IO.Int.Input(
"seed",
default=0,
min=0,
max=2147483647,
step=1,
display_mode=IO.NumberDisplay.number,
control_after_generate=True,
tooltip="Seed to use for generation.",
),
IO.Boolean.Input(
"watermark",
default=False,
tooltip="Whether to add an AI-generated watermark to the result.",
advanced=True,
),
],
outputs=[
IO.Video.Output(),
],
hidden=[
IO.Hidden.auth_token_comfy_org,
IO.Hidden.api_key_comfy_org,
IO.Hidden.unique_id,
],
is_api_node=True,
price_badge=IO.PriceBadge(
depends_on=IO.PriceBadgeDepends(widgets=["model", "model.resolution", "model.duration"]),
expr="""
(
$res := $lookup(widgets, "model.resolution");
$dur := $lookup(widgets, "model.duration");
$ppsTable := { "720p": 0.14, "1080p": 0.24 };
$pps := $lookup($ppsTable, $res);
{ "type": "usd", "usd": $pps * $dur }
)
""",
),
)
@classmethod
async def execute(
cls,
model: dict,
seed: int,
watermark: bool,
):
validate_string(model["prompt"], strip_whitespace=False, min_length=1)
initial_response = await sync_op(
cls,
ApiEndpoint(
path="/proxy/wan/api/v1/services/aigc/video-generation/video-synthesis",
method="POST",
),
response_model=TaskCreationResponse,
data=Wan27Text2VideoTaskCreationRequest(
model=model["model"],
input=Text2VideoInputField(
prompt=model["prompt"],
negative_prompt=None,
),
parameters=Wan27Text2VideoParametersField(
resolution=model["resolution"],
ratio=model["ratio"],
duration=model["duration"],
seed=seed,
watermark=watermark,
),
),
)
if not initial_response.output:
raise Exception(f"An unknown error occurred: {initial_response.code} - {initial_response.message}")
response = await poll_op(
cls,
ApiEndpoint(path=f"/proxy/wan/api/v1/tasks/{initial_response.output.task_id}"),
response_model=VideoTaskStatusResponse,
status_extractor=lambda x: x.output.task_status,
poll_interval=7,
)
return IO.NodeOutput(await download_url_to_video_output(response.output.video_url))
class HappyHorseImageToVideoApi(IO.ComfyNode):
@classmethod
def define_schema(cls):
return IO.Schema(
node_id="HappyHorseImageToVideoApi",
display_name="HappyHorse Image to Video",
category="api node/video/Wan",
description="Generate a video from a first-frame image using the HappyHorse model.",
inputs=[
IO.DynamicCombo.Input(
"model",
options=[
IO.DynamicCombo.Option(
"happyhorse-1.0-i2v",
[
IO.String.Input(
"prompt",
multiline=True,
default="",
tooltip="Prompt describing the elements and visual features. "
"Supports English and Chinese.",
),
IO.Combo.Input(
"resolution",
options=["720P", "1080P"],
),
IO.Int.Input(
"duration",
default=5,
min=3,
max=15,
step=1,
display_mode=IO.NumberDisplay.number,
),
],
),
],
),
IO.Image.Input(
"first_frame",
tooltip="First frame image. The output aspect ratio is derived from this image.",
),
IO.Int.Input(
"seed",
default=0,
min=0,
max=2147483647,
step=1,
display_mode=IO.NumberDisplay.number,
control_after_generate=True,
tooltip="Seed to use for generation.",
),
IO.Boolean.Input(
"watermark",
default=False,
tooltip="Whether to add an AI-generated watermark to the result.",
advanced=True,
),
],
outputs=[
IO.Video.Output(),
],
hidden=[
IO.Hidden.auth_token_comfy_org,
IO.Hidden.api_key_comfy_org,
IO.Hidden.unique_id,
],
is_api_node=True,
price_badge=IO.PriceBadge(
depends_on=IO.PriceBadgeDepends(widgets=["model", "model.resolution", "model.duration"]),
expr="""
(
$res := $lookup(widgets, "model.resolution");
$dur := $lookup(widgets, "model.duration");
$ppsTable := { "720p": 0.14, "1080p": 0.24 };
$pps := $lookup($ppsTable, $res);
{ "type": "usd", "usd": $pps * $dur }
)
""",
),
)
@classmethod
async def execute(
cls,
model: dict,
first_frame: Input.Image,
seed: int,
watermark: bool,
):
media = [
Wan27MediaItem(
type="first_frame",
url=await upload_image_to_comfyapi(cls, image=first_frame),
)
]
initial_response = await sync_op(
cls,
ApiEndpoint(
path="/proxy/wan/api/v1/services/aigc/video-generation/video-synthesis",
method="POST",
),
response_model=TaskCreationResponse,
data=Wan27ImageToVideoTaskCreationRequest(
model=model["model"],
input=Wan27ImageToVideoInputField(
prompt=model["prompt"] or None,
negative_prompt=None,
media=media,
),
parameters=Wan27ImageToVideoParametersField(
resolution=model["resolution"],
duration=model["duration"],
seed=seed,
watermark=watermark,
),
),
)
if not initial_response.output:
raise Exception(f"An unknown error occurred: {initial_response.code} - {initial_response.message}")
response = await poll_op(
cls,
ApiEndpoint(path=f"/proxy/wan/api/v1/tasks/{initial_response.output.task_id}"),
response_model=VideoTaskStatusResponse,
status_extractor=lambda x: x.output.task_status,
poll_interval=7,
)
return IO.NodeOutput(await download_url_to_video_output(response.output.video_url))
class HappyHorseVideoEditApi(IO.ComfyNode):
@classmethod
def define_schema(cls):
return IO.Schema(
node_id="HappyHorseVideoEditApi",
display_name="HappyHorse Video Edit",
category="api node/video/Wan",
description="Edit a video using text instructions or reference images with the HappyHorse model. "
"Output duration is 3-15s and matches the input video; inputs longer than 15s are truncated.",
inputs=[
IO.DynamicCombo.Input(
"model",
options=[
IO.DynamicCombo.Option(
"happyhorse-1.0-video-edit",
[
IO.String.Input(
"prompt",
multiline=True,
default="",
tooltip="Editing instructions or style transfer requirements.",
),
IO.Combo.Input(
"resolution",
options=["720P", "1080P"],
),
IO.Combo.Input(
"ratio",
options=["16:9", "9:16", "1:1", "4:3", "3:4"],
tooltip="Aspect ratio. If not changed, approximates the input video ratio.",
),
IO.Autogrow.Input(
"reference_images",
template=IO.Autogrow.TemplateNames(
IO.Image.Input("reference_image"),
names=[
"image1",
"image2",
"image3",
"image4",
"image5",
],
min=0,
),
),
],
),
],
),
IO.Video.Input(
"video",
tooltip="The video to edit.",
),
IO.Int.Input(
"seed",
default=0,
min=0,
max=2147483647,
step=1,
display_mode=IO.NumberDisplay.number,
control_after_generate=True,
tooltip="Seed to use for generation.",
),
IO.Boolean.Input(
"watermark",
default=False,
tooltip="Whether to add an AI-generated watermark to the result.",
advanced=True,
),
],
outputs=[
IO.Video.Output(),
],
hidden=[
IO.Hidden.auth_token_comfy_org,
IO.Hidden.api_key_comfy_org,
IO.Hidden.unique_id,
],
is_api_node=True,
price_badge=IO.PriceBadge(
depends_on=IO.PriceBadgeDepends(widgets=["model", "model.resolution"]),
expr="""
(
$res := $lookup(widgets, "model.resolution");
$ppsTable := { "720p": 0.14, "1080p": 0.24 };
$pps := $lookup($ppsTable, $res);
{ "type": "usd", "usd": $pps, "format": { "suffix": "/second" } }
)
""",
),
)
@classmethod
async def execute(
cls,
model: dict,
video: Input.Video,
seed: int,
watermark: bool,
):
validate_string(model["prompt"], strip_whitespace=False, min_length=1)
validate_video_duration(video, min_duration=3, max_duration=60)
media = [Wan27MediaItem(type="video", url=await upload_video_to_comfyapi(cls, video))]
reference_images = model.get("reference_images", {})
for key in reference_images:
media.append(
Wan27MediaItem(
type="reference_image", url=await upload_image_to_comfyapi(cls, image=reference_images[key])
)
)
initial_response = await sync_op(
cls,
ApiEndpoint(
path="/proxy/wan/api/v1/services/aigc/video-generation/video-synthesis",
method="POST",
),
response_model=TaskCreationResponse,
data=Wan27VideoEditTaskCreationRequest(
model=model["model"],
input=Wan27VideoEditInputField(prompt=model["prompt"], media=media),
parameters=Wan27VideoEditParametersField(
resolution=model["resolution"],
ratio=model["ratio"],
duration=None,
watermark=watermark,
seed=seed,
),
),
)
if not initial_response.output:
raise Exception(f"An unknown error occurred: {initial_response.code} - {initial_response.message}")
response = await poll_op(
cls,
ApiEndpoint(path=f"/proxy/wan/api/v1/tasks/{initial_response.output.task_id}"),
response_model=VideoTaskStatusResponse,
status_extractor=lambda x: x.output.task_status,
poll_interval=7,
)
return IO.NodeOutput(await download_url_to_video_output(response.output.video_url))
class HappyHorseReferenceVideoApi(IO.ComfyNode):
@classmethod
def define_schema(cls):
return IO.Schema(
node_id="HappyHorseReferenceVideoApi",
display_name="HappyHorse Reference to Video",
category="api node/video/Wan",
description="Generate a video featuring a person or object from reference materials with the HappyHorse "
"model. Supports single-character performances and multi-character interactions.",
inputs=[
IO.DynamicCombo.Input(
"model",
options=[
IO.DynamicCombo.Option(
"happyhorse-1.0-r2v",
[
IO.String.Input(
"prompt",
multiline=True,
default="",
tooltip="Prompt describing the video. Use identifiers such as 'character1' and "
"'character2' to refer to the reference characters.",
),
IO.Combo.Input(
"resolution",
options=["720P", "1080P"],
),
IO.Combo.Input(
"ratio",
options=["16:9", "9:16", "1:1", "4:3", "3:4"],
),
IO.Int.Input(
"duration",
default=5,
min=3,
max=15,
step=1,
display_mode=IO.NumberDisplay.number,
),
IO.Autogrow.Input(
"reference_images",
template=IO.Autogrow.TemplateNames(
IO.Image.Input("reference_image"),
names=[
"image1",
"image2",
"image3",
"image4",
"image5",
"image6",
"image7",
"image8",
"image9",
],
min=1,
),
),
],
),
],
),
IO.Int.Input(
"seed",
default=0,
min=0,
max=2147483647,
step=1,
display_mode=IO.NumberDisplay.number,
control_after_generate=True,
tooltip="Seed to use for generation.",
),
IO.Boolean.Input(
"watermark",
default=False,
tooltip="Whether to add an AI-generated watermark to the result.",
advanced=True,
),
],
outputs=[
IO.Video.Output(),
],
hidden=[
IO.Hidden.auth_token_comfy_org,
IO.Hidden.api_key_comfy_org,
IO.Hidden.unique_id,
],
is_api_node=True,
price_badge=IO.PriceBadge(
depends_on=IO.PriceBadgeDepends(widgets=["model", "model.resolution", "model.duration"]),
expr="""
(
$res := $lookup(widgets, "model.resolution");
$dur := $lookup(widgets, "model.duration");
$ppsTable := { "720p": 0.14, "1080p": 0.24 };
$pps := $lookup($ppsTable, $res);
{ "type": "usd", "usd": $pps * $dur }
)
""",
),
)
@classmethod
async def execute(
cls,
model: dict,
seed: int,
watermark: bool,
):
validate_string(model["prompt"], strip_whitespace=False, min_length=1)
media = []
reference_images = model.get("reference_images", {})
for key in reference_images:
media.append(
Wan27MediaItem(
type="reference_image",
url=await upload_image_to_comfyapi(cls, image=reference_images[key]),
)
)
if not media:
raise ValueError("At least one reference reference image must be provided.")
initial_response = await sync_op(
cls,
ApiEndpoint(
path="/proxy/wan/api/v1/services/aigc/video-generation/video-synthesis",
method="POST",
),
response_model=TaskCreationResponse,
data=Wan27ReferenceVideoTaskCreationRequest(
model=model["model"],
input=Wan27ReferenceVideoInputField(
prompt=model["prompt"],
negative_prompt=None,
media=media,
),
parameters=Wan27ReferenceVideoParametersField(
resolution=model["resolution"],
ratio=model["ratio"],
duration=model["duration"],
watermark=watermark,
seed=seed,
),
),
)
if not initial_response.output:
raise Exception(f"An unknown error occurred: {initial_response.code} - {initial_response.message}")
response = await poll_op(
cls,
ApiEndpoint(path=f"/proxy/wan/api/v1/tasks/{initial_response.output.task_id}"),
response_model=VideoTaskStatusResponse,
status_extractor=lambda x: x.output.task_status,
poll_interval=7,
)
return IO.NodeOutput(await download_url_to_video_output(response.output.video_url))
class WanApiExtension(ComfyExtension):
@override
async def get_node_list(self) -> list[type[IO.ComfyNode]]:
@ -1660,6 +2211,10 @@ class WanApiExtension(ComfyExtension):
Wan2VideoContinuationApi,
Wan2VideoEditApi,
Wan2ReferenceVideoApi,
HappyHorseTextToVideoApi,
HappyHorseImageToVideoApi,
HappyHorseVideoEditApi,
HappyHorseReferenceVideoApi,
]