[Partner Nodes] add SD2 real human support (#13509)

* feat(api-nodes): add SD2 real human support

Signed-off-by: bigcat88 <bigcat88@icloud.com>

* fix: add validation before uploading Assets

Signed-off-by: bigcat88 <bigcat88@icloud.com>

* Add asset_id and group_id displaying on the node

Signed-off-by: bigcat88 <bigcat88@icloud.com>

* extend poll_op to use instead of custom async cycle

Signed-off-by: bigcat88 <bigcat88@icloud.com>

* added the polling for the "Active" status after asset creation

Signed-off-by: bigcat88 <bigcat88@icloud.com>

* updated tooltip for group_id

* allow usage of real human in the ByteDance2FirstLastFrame node

* add reference count limits

* corrected price in status when input assets contain video

Signed-off-by: bigcat88 <bigcat88@icloud.com>

---------

Signed-off-by: bigcat88 <bigcat88@icloud.com>
This commit is contained in:
Alexander Piskun 2026-04-23 03:59:55 +03:00 committed by GitHub
parent 0be87b082a
commit e988df72f8
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 494 additions and 18 deletions

View File

@ -122,6 +122,41 @@ class TaskStatusResponse(BaseModel):
usage: TaskStatusUsage | None = Field(None)
class GetAssetResponse(BaseModel):
id: str = Field(...)
name: str | None = Field(None)
url: str | None = Field(None)
asset_type: str = Field(...)
group_id: str = Field(...)
status: str = Field(...)
error: TaskStatusError | None = Field(None)
class SeedanceCreateVisualValidateSessionResponse(BaseModel):
session_id: str = Field(...)
h5_link: str = Field(...)
class SeedanceGetVisualValidateSessionResponse(BaseModel):
session_id: str = Field(...)
status: str = Field(...)
group_id: str | None = Field(None)
error_code: str | None = Field(None)
error_message: str | None = Field(None)
class SeedanceCreateAssetRequest(BaseModel):
group_id: str = Field(...)
url: str = Field(...)
asset_type: str = Field(...)
name: str | None = Field(None, max_length=64)
project_name: str | None = Field(None)
class SeedanceCreateAssetResponse(BaseModel):
asset_id: str = Field(...)
# Dollars per 1K tokens, keyed by (model_id, has_video_input).
SEEDANCE2_PRICE_PER_1K_TOKENS = {
("dreamina-seedance-2-0-260128", False): 0.007,

View File

@ -1,5 +1,6 @@
import logging
import math
import re
import torch
from typing_extensions import override
@ -11,9 +12,14 @@ from comfy_api_nodes.apis.bytedance import (
SEEDANCE2_PRICE_PER_1K_TOKENS,
SEEDANCE2_REF_VIDEO_PIXEL_LIMITS,
VIDEO_TASKS_EXECUTION_TIME,
GetAssetResponse,
Image2VideoTaskCreationRequest,
ImageTaskCreationResponse,
Seedance2TaskCreationRequest,
SeedanceCreateAssetRequest,
SeedanceCreateAssetResponse,
SeedanceCreateVisualValidateSessionResponse,
SeedanceGetVisualValidateSessionResponse,
Seedream4Options,
Seedream4TaskCreationRequest,
TaskAudioContent,
@ -44,10 +50,16 @@ from comfy_api_nodes.util import (
validate_image_aspect_ratio,
validate_image_dimensions,
validate_string,
validate_video_dimensions,
validate_video_duration,
)
from server import PromptServer
BYTEPLUS_IMAGE_ENDPOINT = "/proxy/byteplus/api/v3/images/generations"
_VERIFICATION_POLL_TIMEOUT_SEC = 120
_VERIFICATION_POLL_INTERVAL_SEC = 3
SEEDREAM_MODELS = {
"seedream 5.0 lite": "seedream-5-0-260128",
"seedream-4-5-251128": "seedream-4-5-251128",
@ -96,6 +108,169 @@ def _validate_ref_video_pixels(video: Input.Video, model_id: str, resolution: st
)
async def _resolve_reference_assets(
cls: type[IO.ComfyNode],
asset_ids: list[str],
) -> tuple[dict[str, str], dict[str, str], dict[str, str]]:
"""Look up each asset, validate Active status, group by asset_type.
Returns (image_assets, video_assets, audio_assets), each mapping asset_id -> "asset://<asset_id>".
"""
image_assets: dict[str, str] = {}
video_assets: dict[str, str] = {}
audio_assets: dict[str, str] = {}
for i, raw_id in enumerate(asset_ids, 1):
asset_id = (raw_id or "").strip()
if not asset_id:
continue
result = await sync_op(
cls,
ApiEndpoint(path=f"/proxy/seedance/assets/{asset_id}"),
response_model=GetAssetResponse,
)
if result.status != "Active":
extra = f" {result.error.code}: {result.error.message}" if result.error else ""
raise ValueError(f"Reference asset {i} (Id={asset_id}) is not Active (Status={result.status}).{extra}")
asset_uri = f"asset://{asset_id}"
if result.asset_type == "Image":
image_assets[asset_id] = asset_uri
elif result.asset_type == "Video":
video_assets[asset_id] = asset_uri
elif result.asset_type == "Audio":
audio_assets[asset_id] = asset_uri
return image_assets, video_assets, audio_assets
_ASSET_REF_RE = re.compile(r"\basset ?(\d{1,2})\b", re.IGNORECASE)
def _build_asset_labels(
reference_assets: dict[str, str],
image_asset_uris: dict[str, str],
video_asset_uris: dict[str, str],
audio_asset_uris: dict[str, str],
n_reference_images: int,
n_reference_videos: int,
n_reference_audios: int,
) -> dict[int, str]:
"""Map asset slot number (from 'asset_N' keys) to its positional label.
Asset entries are appended to `content` after the reference_images/videos/audios,
so their 1-indexed labels continue from the count of existing same-type refs:
one reference_images entry + one Image-type asset -> asset labelled "Image 2".
"""
image_n = n_reference_images
video_n = n_reference_videos
audio_n = n_reference_audios
labels: dict[int, str] = {}
for slot_key, raw_id in reference_assets.items():
asset_id = (raw_id or "").strip()
if not asset_id:
continue
try:
slot_num = int(slot_key.rsplit("_", 1)[-1])
except ValueError:
continue
if asset_id in image_asset_uris:
image_n += 1
labels[slot_num] = f"Image {image_n}"
elif asset_id in video_asset_uris:
video_n += 1
labels[slot_num] = f"Video {video_n}"
elif asset_id in audio_asset_uris:
audio_n += 1
labels[slot_num] = f"Audio {audio_n}"
return labels
def _rewrite_asset_refs(prompt: str, labels: dict[int, str]) -> str:
"""Case-insensitively replace 'assetNN' (1-2 digit) tokens with their labels."""
if not labels:
return prompt
def _sub(m: "re.Match[str]") -> str:
return labels.get(int(m.group(1)), m.group(0))
return _ASSET_REF_RE.sub(_sub, prompt)
async def _obtain_group_id_via_h5_auth(cls: type[IO.ComfyNode]) -> str:
session = await sync_op(
cls,
ApiEndpoint(path="/proxy/seedance/visual-validate/sessions", method="POST"),
response_model=SeedanceCreateVisualValidateSessionResponse,
)
logger.warning("Seedance authentication required. Open link: %s", session.h5_link)
h5_text = f"Open this link in your browser and complete face verification:\n\n{session.h5_link}"
result = await poll_op(
cls,
ApiEndpoint(path=f"/proxy/seedance/visual-validate/sessions/{session.session_id}"),
response_model=SeedanceGetVisualValidateSessionResponse,
status_extractor=lambda r: r.status,
completed_statuses=["completed"],
failed_statuses=["failed"],
poll_interval=_VERIFICATION_POLL_INTERVAL_SEC,
max_poll_attempts=(_VERIFICATION_POLL_TIMEOUT_SEC // _VERIFICATION_POLL_INTERVAL_SEC) - 1,
estimated_duration=_VERIFICATION_POLL_TIMEOUT_SEC - 1,
extra_text=h5_text,
)
if not result.group_id:
raise RuntimeError(f"Seedance session {session.session_id} completed without a group_id")
logger.warning("Seedance authentication complete. New GroupId: %s", result.group_id)
PromptServer.instance.send_progress_text(
f"Authentication complete. New GroupId: {result.group_id}", cls.hidden.unique_id
)
return result.group_id
async def _resolve_group_id(cls: type[IO.ComfyNode], group_id: str) -> str:
if group_id and group_id.strip():
return group_id.strip()
return await _obtain_group_id_via_h5_auth(cls)
async def _create_seedance_asset(
cls: type[IO.ComfyNode],
*,
group_id: str,
url: str,
name: str,
asset_type: str,
) -> str:
req = SeedanceCreateAssetRequest(
group_id=group_id,
url=url,
asset_type=asset_type,
name=name or None,
)
result = await sync_op(
cls,
ApiEndpoint(path="/proxy/seedance/assets", method="POST"),
response_model=SeedanceCreateAssetResponse,
data=req,
)
return result.asset_id
async def _wait_for_asset_active(cls: type[IO.ComfyNode], asset_id: str, group_id: str) -> GetAssetResponse:
"""Poll the newly created asset until its status becomes Active."""
return await poll_op(
cls,
ApiEndpoint(path=f"/proxy/seedance/assets/{asset_id}"),
response_model=GetAssetResponse,
status_extractor=lambda r: r.status,
completed_statuses=["Active"],
failed_statuses=["Failed"],
poll_interval=5,
max_poll_attempts=1200,
extra_text=f"Waiting for asset pre-processing...\n\nasset_id: {asset_id}\n\ngroup_id: {group_id}",
)
def _seedance2_price_extractor(model_id: str, has_video_input: bool):
"""Returns a price_extractor closure for Seedance 2.0 poll_op."""
rate = SEEDANCE2_PRICE_PER_1K_TOKENS.get((model_id, has_video_input))
@ -1228,12 +1403,27 @@ class ByteDance2FirstLastFrameNode(IO.ComfyNode):
IO.Image.Input(
"first_frame",
tooltip="First frame image for the video.",
optional=True,
),
IO.Image.Input(
"last_frame",
tooltip="Last frame image for the video.",
optional=True,
),
IO.String.Input(
"first_frame_asset_id",
default="",
tooltip="Seedance asset_id to use as the first frame. "
"Mutually exclusive with the first_frame image input.",
optional=True,
),
IO.String.Input(
"last_frame_asset_id",
default="",
tooltip="Seedance asset_id to use as the last frame. "
"Mutually exclusive with the last_frame image input.",
optional=True,
),
IO.Int.Input(
"seed",
default=0,
@ -1286,24 +1476,54 @@ class ByteDance2FirstLastFrameNode(IO.ComfyNode):
async def execute(
cls,
model: dict,
first_frame: Input.Image,
seed: int,
watermark: bool,
first_frame: Input.Image | None = None,
last_frame: Input.Image | None = None,
first_frame_asset_id: str = "",
last_frame_asset_id: str = "",
) -> IO.NodeOutput:
validate_string(model["prompt"], strip_whitespace=True, min_length=1)
model_id = SEEDANCE_MODELS[model["model"]]
first_frame_asset_id = first_frame_asset_id.strip()
last_frame_asset_id = last_frame_asset_id.strip()
if first_frame is not None and first_frame_asset_id:
raise ValueError("Provide only one of first_frame or first_frame_asset_id, not both.")
if first_frame is None and not first_frame_asset_id:
raise ValueError("Either first_frame or first_frame_asset_id is required.")
if last_frame is not None and last_frame_asset_id:
raise ValueError("Provide only one of last_frame or last_frame_asset_id, not both.")
asset_ids_to_resolve = [a for a in (first_frame_asset_id, last_frame_asset_id) if a]
image_assets: dict[str, str] = {}
if asset_ids_to_resolve:
image_assets, _, _ = await _resolve_reference_assets(cls, asset_ids_to_resolve)
for aid in asset_ids_to_resolve:
if aid not in image_assets:
raise ValueError(f"Asset {aid} is not an Image asset.")
if first_frame_asset_id:
first_frame_url = image_assets[first_frame_asset_id]
else:
first_frame_url = await upload_image_to_comfyapi(cls, first_frame, wait_label="Uploading first frame.")
content: list[TaskTextContent | TaskImageContent] = [
TaskTextContent(text=model["prompt"]),
TaskImageContent(
image_url=TaskImageContentUrl(
url=await upload_image_to_comfyapi(cls, first_frame, wait_label="Uploading first frame.")
),
image_url=TaskImageContentUrl(url=first_frame_url),
role="first_frame",
),
]
if last_frame is not None:
if last_frame_asset_id:
content.append(
TaskImageContent(
image_url=TaskImageContentUrl(url=image_assets[last_frame_asset_id]),
role="last_frame",
),
)
elif last_frame is not None:
content.append(
TaskImageContent(
image_url=TaskImageContentUrl(
@ -1385,6 +1605,24 @@ def _seedance2_reference_inputs(resolutions: list[str]):
tooltip="Automatically downscale reference videos that exceed the model's pixel budget "
"for the selected resolution. Aspect ratio is preserved; videos already within limits are untouched.",
),
IO.Autogrow.Input(
"reference_assets",
template=IO.Autogrow.TemplateNames(
IO.String.Input("reference_asset"),
names=[
"asset_1",
"asset_2",
"asset_3",
"asset_4",
"asset_5",
"asset_6",
"asset_7",
"asset_8",
"asset_9",
],
min=0,
),
),
]
@ -1486,24 +1724,42 @@ class ByteDance2ReferenceNode(IO.ComfyNode):
reference_images = model.get("reference_images", {})
reference_videos = model.get("reference_videos", {})
reference_audios = model.get("reference_audios", {})
reference_assets = model.get("reference_assets", {})
if not reference_images and not reference_videos:
raise ValueError("At least one reference image or video is required.")
reference_image_assets, reference_video_assets, reference_audio_assets = await _resolve_reference_assets(
cls, list(reference_assets.values())
)
if not reference_images and not reference_videos and not reference_image_assets and not reference_video_assets:
raise ValueError("At least one reference image or video or asset is required.")
total_images = len(reference_images) + len(reference_image_assets)
if total_images > 9:
raise ValueError(
f"Too many reference images: {total_images} "
f"(images={len(reference_images)}, image assets={len(reference_image_assets)}). Maximum is 9."
)
total_videos = len(reference_videos) + len(reference_video_assets)
if total_videos > 3:
raise ValueError(
f"Too many reference videos: {total_videos} "
f"(videos={len(reference_videos)}, video assets={len(reference_video_assets)}). Maximum is 3."
)
total_audios = len(reference_audios) + len(reference_audio_assets)
if total_audios > 3:
raise ValueError(
f"Too many reference audios: {total_audios} "
f"(audios={len(reference_audios)}, audio assets={len(reference_audio_assets)}). Maximum is 3."
)
model_id = SEEDANCE_MODELS[model["model"]]
has_video_input = len(reference_videos) > 0
has_video_input = total_videos > 0
if model.get("auto_downscale") and reference_videos:
max_px = (
SEEDANCE2_REF_VIDEO_PIXEL_LIMITS.get(model_id, {})
.get(model["resolution"], {})
.get("max")
)
max_px = SEEDANCE2_REF_VIDEO_PIXEL_LIMITS.get(model_id, {}).get(model["resolution"], {}).get("max")
if max_px:
for key in reference_videos:
reference_videos[key] = resize_video_to_pixel_budget(
reference_videos[key], max_px
)
reference_videos[key] = resize_video_to_pixel_budget(reference_videos[key], max_px)
total_video_duration = 0.0
for i, key in enumerate(reference_videos, 1):
@ -1531,8 +1787,19 @@ class ByteDance2ReferenceNode(IO.ComfyNode):
if total_audio_duration > 15.1:
raise ValueError(f"Total reference audio duration is {total_audio_duration:.1f}s. Maximum is 15.1 seconds.")
asset_labels = _build_asset_labels(
reference_assets,
reference_image_assets,
reference_video_assets,
reference_audio_assets,
len(reference_images),
len(reference_videos),
len(reference_audios),
)
prompt_text = _rewrite_asset_refs(model["prompt"], asset_labels)
content: list[TaskTextContent | TaskImageContent | TaskVideoContent | TaskAudioContent] = [
TaskTextContent(text=model["prompt"]),
TaskTextContent(text=prompt_text),
]
for i, key in enumerate(reference_images, 1):
content.append(
@ -1573,6 +1840,21 @@ class ByteDance2ReferenceNode(IO.ComfyNode):
),
),
)
for url in reference_image_assets.values():
content.append(
TaskImageContent(
image_url=TaskImageContentUrl(url=url),
role="reference_image",
),
)
for url in reference_video_assets.values():
content.append(
TaskVideoContent(video_url=TaskVideoContentUrl(url=url)),
)
for url in reference_audio_assets.values():
content.append(
TaskAudioContent(audio_url=TaskAudioContentUrl(url=url)),
)
initial_response = await sync_op(
cls,
ApiEndpoint(path=BYTEPLUS_TASK_ENDPOINT, method="POST"),
@ -1627,6 +1909,156 @@ async def process_video_task(
return IO.NodeOutput(await download_url_to_video_output(response.content.video_url))
class ByteDanceCreateImageAsset(IO.ComfyNode):
@classmethod
def define_schema(cls) -> IO.Schema:
return IO.Schema(
node_id="ByteDanceCreateImageAsset",
display_name="ByteDance Create Image Asset",
category="api node/image/ByteDance",
description=(
"Create a Seedance 2.0 personal image asset. Uploads the input image and "
"registers it in the given asset group. If group_id is empty, runs a real-person "
"H5 authentication flow to create a new group before adding the asset."
),
inputs=[
IO.Image.Input("image", tooltip="Image to register as a personal asset."),
IO.String.Input(
"group_id",
default="",
tooltip="Reuse an existing Seedance asset group ID to skip repeated human verification for the "
"same person. Leave empty to run real-person authentication in the browser and create a new group.",
),
# IO.String.Input(
# "name",
# default="",
# tooltip="Asset name (up to 64 characters).",
# ),
],
outputs=[
IO.String.Output(display_name="asset_id"),
IO.String.Output(display_name="group_id"),
],
hidden=[
IO.Hidden.auth_token_comfy_org,
IO.Hidden.api_key_comfy_org,
IO.Hidden.unique_id,
],
# is_api_node=True,
)
@classmethod
async def execute(
cls,
image: Input.Image,
group_id: str = "",
# name: str = "",
) -> IO.NodeOutput:
# if len(name) > 64:
# raise ValueError("Name of asset can not be greater then 64 symbols")
validate_image_dimensions(image, min_width=300, max_width=6000, min_height=300, max_height=6000)
validate_image_aspect_ratio(image, min_ratio=(0.4, 1), max_ratio=(2.5, 1))
resolved_group = await _resolve_group_id(cls, group_id)
asset_id = await _create_seedance_asset(
cls,
group_id=resolved_group,
url=await upload_image_to_comfyapi(cls, image),
name="",
asset_type="Image",
)
await _wait_for_asset_active(cls, asset_id, resolved_group)
PromptServer.instance.send_progress_text(
f"Please save the asset_id and group_id for reuse.\n\nasset_id: {asset_id}\n\n"
f"group_id: {resolved_group}",
cls.hidden.unique_id,
)
return IO.NodeOutput(asset_id, resolved_group)
class ByteDanceCreateVideoAsset(IO.ComfyNode):
@classmethod
def define_schema(cls) -> IO.Schema:
return IO.Schema(
node_id="ByteDanceCreateVideoAsset",
display_name="ByteDance Create Video Asset",
category="api node/video/ByteDance",
description=(
"Create a Seedance 2.0 personal video asset. Uploads the input video and "
"registers it in the given asset group. If group_id is empty, runs a real-person "
"H5 authentication flow to create a new group before adding the asset."
),
inputs=[
IO.Video.Input("video", tooltip="Video to register as a personal asset."),
IO.String.Input(
"group_id",
default="",
tooltip="Reuse an existing Seedance asset group ID to skip repeated human verification for the "
"same person. Leave empty to run real-person authentication in the browser and create a new group.",
),
# IO.String.Input(
# "name",
# default="",
# tooltip="Asset name (up to 64 characters).",
# ),
],
outputs=[
IO.String.Output(display_name="asset_id"),
IO.String.Output(display_name="group_id"),
],
hidden=[
IO.Hidden.auth_token_comfy_org,
IO.Hidden.api_key_comfy_org,
IO.Hidden.unique_id,
],
# is_api_node=True,
)
@classmethod
async def execute(
cls,
video: Input.Video,
group_id: str = "",
# name: str = "",
) -> IO.NodeOutput:
# if len(name) > 64:
# raise ValueError("Name of asset can not be greater then 64 symbols")
validate_video_duration(video, min_duration=2, max_duration=15)
validate_video_dimensions(video, min_width=300, max_width=6000, min_height=300, max_height=6000)
w, h = video.get_dimensions()
if h > 0:
ratio = w / h
if not (0.4 <= ratio <= 2.5):
raise ValueError(f"Asset video aspect ratio (W/H) must be in [0.4, 2.5], got {ratio:.3f} ({w}x{h}).")
pixels = w * h
if not (409_600 <= pixels <= 927_408):
raise ValueError(
f"Asset video total pixels (W×H) must be in [409600, 927408], " f"got {pixels:,} ({w}x{h})."
)
fps = float(video.get_frame_rate())
if not (24 <= fps <= 60):
raise ValueError(f"Asset video FPS must be in [24, 60], got {fps:.2f}.")
resolved_group = await _resolve_group_id(cls, group_id)
asset_id = await _create_seedance_asset(
cls,
group_id=resolved_group,
url=await upload_video_to_comfyapi(cls, video),
name="",
asset_type="Video",
)
await _wait_for_asset_active(cls, asset_id, resolved_group)
PromptServer.instance.send_progress_text(
f"Please save the asset_id and group_id for reuse.\n\nasset_id: {asset_id}\n\n"
f"group_id: {resolved_group}",
cls.hidden.unique_id,
)
return IO.NodeOutput(asset_id, resolved_group)
class ByteDanceExtension(ComfyExtension):
@override
async def get_node_list(self) -> list[type[IO.ComfyNode]]:
@ -1640,6 +2072,8 @@ class ByteDanceExtension(ComfyExtension):
ByteDance2TextToVideoNode,
ByteDance2FirstLastFrameNode,
ByteDance2ReferenceNode,
ByteDanceCreateImageAsset,
ByteDanceCreateVideoAsset,
]

View File

@ -156,6 +156,7 @@ async def poll_op(
estimated_duration: int | None = None,
cancel_endpoint: ApiEndpoint | None = None,
cancel_timeout: float = 10.0,
extra_text: str | None = None,
) -> M:
raw = await poll_op_raw(
cls,
@ -176,6 +177,7 @@ async def poll_op(
estimated_duration=estimated_duration,
cancel_endpoint=cancel_endpoint,
cancel_timeout=cancel_timeout,
extra_text=extra_text,
)
if not isinstance(raw, dict):
raise Exception("Expected JSON response to validate into a Pydantic model, got non-JSON (binary or text).")
@ -260,6 +262,7 @@ async def poll_op_raw(
estimated_duration: int | None = None,
cancel_endpoint: ApiEndpoint | None = None,
cancel_timeout: float = 10.0,
extra_text: str | None = None,
) -> dict[str, Any]:
"""
Polls an endpoint until the task reaches a terminal state. Displays time while queued/processing,
@ -299,6 +302,7 @@ async def poll_op_raw(
price=state.price,
is_queued=state.is_queued,
processing_elapsed_seconds=int(proc_elapsed),
extra_text=extra_text,
)
await asyncio.sleep(1.0)
except Exception as exc:
@ -389,6 +393,7 @@ async def poll_op_raw(
price=state.price,
is_queued=False,
processing_elapsed_seconds=int(state.base_processing_elapsed),
extra_text=extra_text,
)
return resp_json
@ -462,6 +467,7 @@ def _display_time_progress(
price: float | None = None,
is_queued: bool | None = None,
processing_elapsed_seconds: int | None = None,
extra_text: str | None = None,
) -> None:
if estimated_total is not None and estimated_total > 0 and is_queued is False:
pe = processing_elapsed_seconds if processing_elapsed_seconds is not None else elapsed_seconds
@ -469,7 +475,8 @@ def _display_time_progress(
time_line = f"Time elapsed: {int(elapsed_seconds)}s (~{remaining}s remaining)"
else:
time_line = f"Time elapsed: {int(elapsed_seconds)}s"
_display_text(node_cls, time_line, status=status, price=price)
text = f"{time_line}\n\n{extra_text}" if extra_text else time_line
_display_text(node_cls, text, status=status, price=price)
async def _diagnose_connectivity() -> dict[str, bool]: