Compare commits

...

4 Commits

Author SHA1 Message Date
Sipovec
91c6ac6268
Merge 5ddc0c1245 into ed6002cb60 2026-01-25 02:44:53 +00:00
pythongosssss
ed6002cb60
add support for kwargs inputs to allow arbitrary inputs from frontend (#12063)
used to output selected combo index

Co-authored-by: Jedrzej Kosinski <kosinkadink1@gmail.com>
2026-01-24 17:30:40 -08:00
Alexander Piskun
bc72d7f8d1
[API Nodes] add TencentHunyuan3D nodes (#12026)
* feat(api-nodes): add TencentHunyuan3D nodes

* add "(Pro)" to display name

---------

Co-authored-by: Jedrzej Kosinski <kosinkadink1@gmail.com>
2026-01-24 17:10:09 -08:00
comfyanonymous
aef4e13588
Make empty latent node work with other models. (#12062) 2026-01-24 19:23:20 -05:00
13 changed files with 429 additions and 10 deletions

View File

@ -594,6 +594,7 @@ class Wan22(Wan21):
class HunyuanImage21(LatentFormat):
latent_channels = 64
latent_dimensions = 2
spacial_downscale_ratio = 32
scale_factor = 0.75289
latent_rgb_factors = [
@ -727,6 +728,7 @@ class HunyuanVideo15(LatentFormat):
latent_rgb_factors_bias = [ 0.0456, -0.0202, -0.0644]
latent_channels = 32
latent_dimensions = 3
spacial_downscale_ratio = 16
scale_factor = 1.03682
taesd_decoder_name = "lighttaehy1_5"

View File

@ -1383,6 +1383,8 @@ class Schema:
"""Flags a node as not idempotent; when True, the node will run and not reuse the cached outputs when identical inputs are provided on a different node in the graph."""
enable_expand: bool=False
"""Flags a node as expandable, allowing NodeOutput to include 'expand' property."""
accept_all_inputs: bool=False
"""When True, all inputs from the prompt will be passed to the node as kwargs, even if not defined in the schema."""
def validate(self):
'''Validate the schema:
@ -1853,6 +1855,14 @@ class _ComfyNodeBaseInternal(_ComfyNodeInternal):
cls.GET_SCHEMA()
return cls._NOT_IDEMPOTENT
_ACCEPT_ALL_INPUTS = None
@final
@classproperty
def ACCEPT_ALL_INPUTS(cls): # noqa
if cls._ACCEPT_ALL_INPUTS is None:
cls.GET_SCHEMA()
return cls._ACCEPT_ALL_INPUTS
@final
@classmethod
def INPUT_TYPES(cls) -> dict[str, dict]:
@ -1891,6 +1901,8 @@ class _ComfyNodeBaseInternal(_ComfyNodeInternal):
cls._INPUT_IS_LIST = schema.is_input_list
if cls._NOT_IDEMPOTENT is None:
cls._NOT_IDEMPOTENT = schema.not_idempotent
if cls._ACCEPT_ALL_INPUTS is None:
cls._ACCEPT_ALL_INPUTS = schema.accept_all_inputs
if cls._RETURN_TYPES is None:
output = []

View File

@ -0,0 +1,66 @@
from typing import TypedDict
from pydantic import BaseModel, Field, model_validator
class InputGenerateType(TypedDict):
generate_type: str
polygon_type: str
pbr: bool
class Hunyuan3DViewImage(BaseModel):
ViewType: str = Field(..., description="Valid values: back, left, right.")
ViewImageUrl: str = Field(...)
class To3DProTaskRequest(BaseModel):
Model: str = Field(...)
Prompt: str | None = Field(None)
ImageUrl: str | None = Field(None)
MultiViewImages: list[Hunyuan3DViewImage] | None = Field(None)
EnablePBR: bool | None = Field(...)
FaceCount: int | None = Field(...)
GenerateType: str | None = Field(...)
PolygonType: str | None = Field(...)
class RequestError(BaseModel):
Code: str = Field("")
Message: str = Field("")
class To3DProTaskCreateResponse(BaseModel):
JobId: str | None = Field(None)
Error: RequestError | None = Field(None)
@model_validator(mode="before")
@classmethod
def unwrap_data(cls, values: dict) -> dict:
if "Response" in values and isinstance(values["Response"], dict):
return values["Response"]
return values
class ResultFile3D(BaseModel):
Type: str = Field(...)
Url: str = Field(...)
PreviewImageUrl: str = Field("")
class To3DProTaskResultResponse(BaseModel):
ErrorCode: str = Field("")
ErrorMessage: str = Field("")
ResultFile3Ds: list[ResultFile3D] = Field([])
Status: str = Field(...)
@model_validator(mode="before")
@classmethod
def unwrap_data(cls, values: dict) -> dict:
if "Response" in values and isinstance(values["Response"], dict):
return values["Response"]
return values
class To3DProTaskQueryRequest(BaseModel):
JobId: str = Field(...)

View File

@ -0,0 +1,297 @@
import os
from typing_extensions import override
from comfy_api.latest import IO, ComfyExtension, Input
from comfy_api_nodes.apis.hunyuan3d import (
Hunyuan3DViewImage,
InputGenerateType,
ResultFile3D,
To3DProTaskCreateResponse,
To3DProTaskQueryRequest,
To3DProTaskRequest,
To3DProTaskResultResponse,
)
from comfy_api_nodes.util import (
ApiEndpoint,
download_url_to_bytesio,
downscale_image_tensor_by_max_side,
poll_op,
sync_op,
upload_image_to_comfyapi,
validate_image_dimensions,
validate_string,
)
from folder_paths import get_output_directory
def get_glb_obj_from_response(response_objs: list[ResultFile3D]) -> ResultFile3D:
for i in response_objs:
if i.Type.lower() == "glb":
return i
raise ValueError("No GLB file found in response. Please report this to the developers.")
class TencentTextToModelNode(IO.ComfyNode):
@classmethod
def define_schema(cls):
return IO.Schema(
node_id="TencentTextToModelNode",
display_name="Hunyuan3D: Text to Model (Pro)",
category="api node/3d/Tencent",
inputs=[
IO.Combo.Input(
"model",
options=["3.0", "3.1"],
tooltip="The LowPoly option is unavailable for the `3.1` model.",
),
IO.String.Input("prompt", multiline=True, default="", tooltip="Supports up to 1024 characters."),
IO.Int.Input("face_count", default=500000, min=40000, max=1500000),
IO.DynamicCombo.Input(
"generate_type",
options=[
IO.DynamicCombo.Option("Normal", [IO.Boolean.Input("pbr", default=False)]),
IO.DynamicCombo.Option(
"LowPoly",
[
IO.Combo.Input("polygon_type", options=["triangle", "quadrilateral"]),
IO.Boolean.Input("pbr", default=False),
],
),
IO.DynamicCombo.Option("Geometry", []),
],
),
IO.Int.Input(
"seed",
default=0,
min=0,
max=2147483647,
display_mode=IO.NumberDisplay.number,
control_after_generate=True,
tooltip="Seed controls whether the node should re-run; "
"results are non-deterministic regardless of seed.",
),
],
outputs=[
IO.String.Output(display_name="model_file"),
],
hidden=[
IO.Hidden.auth_token_comfy_org,
IO.Hidden.api_key_comfy_org,
IO.Hidden.unique_id,
],
is_api_node=True,
is_output_node=True,
price_badge=IO.PriceBadge(
depends_on=IO.PriceBadgeDepends(widgets=["generate_type", "generate_type.pbr", "face_count"]),
expr="""
(
$base := widgets.generate_type = "normal" ? 25 : widgets.generate_type = "lowpoly" ? 30 : 15;
$pbr := $lookup(widgets, "generate_type.pbr") ? 10 : 0;
$face := widgets.face_count != 500000 ? 10 : 0;
{"type":"usd","usd": ($base + $pbr + $face) * 0.02}
)
""",
),
)
@classmethod
async def execute(
cls,
model: str,
prompt: str,
face_count: int,
generate_type: InputGenerateType,
seed: int,
) -> IO.NodeOutput:
_ = seed
validate_string(prompt, field_name="prompt", min_length=1, max_length=1024)
if model == "3.1" and generate_type["generate_type"].lower() == "lowpoly":
raise ValueError("The LowPoly option is currently unavailable for the 3.1 model.")
response = await sync_op(
cls,
ApiEndpoint(path="/proxy/tencent/hunyuan/3d-pro", method="POST"),
response_model=To3DProTaskCreateResponse,
data=To3DProTaskRequest(
Model=model,
Prompt=prompt,
FaceCount=face_count,
GenerateType=generate_type["generate_type"],
EnablePBR=generate_type.get("pbr", None),
PolygonType=generate_type.get("polygon_type", None),
),
)
if response.Error:
raise ValueError(f"Task creation failed with code {response.Error.Code}: {response.Error.Message}")
result = await poll_op(
cls,
ApiEndpoint(path="/proxy/tencent/hunyuan/3d-pro/query", method="POST"),
data=To3DProTaskQueryRequest(JobId=response.JobId),
response_model=To3DProTaskResultResponse,
status_extractor=lambda r: r.Status,
)
model_file = f"hunyuan_model_{response.JobId}.glb"
await download_url_to_bytesio(
get_glb_obj_from_response(result.ResultFile3Ds).Url,
os.path.join(get_output_directory(), model_file),
)
return IO.NodeOutput(model_file)
class TencentImageToModelNode(IO.ComfyNode):
@classmethod
def define_schema(cls):
return IO.Schema(
node_id="TencentImageToModelNode",
display_name="Hunyuan3D: Image(s) to Model (Pro)",
category="api node/3d/Tencent",
inputs=[
IO.Combo.Input(
"model",
options=["3.0", "3.1"],
tooltip="The LowPoly option is unavailable for the `3.1` model.",
),
IO.Image.Input("image"),
IO.Image.Input("image_left", optional=True),
IO.Image.Input("image_right", optional=True),
IO.Image.Input("image_back", optional=True),
IO.Int.Input("face_count", default=500000, min=40000, max=1500000),
IO.DynamicCombo.Input(
"generate_type",
options=[
IO.DynamicCombo.Option("Normal", [IO.Boolean.Input("pbr", default=False)]),
IO.DynamicCombo.Option(
"LowPoly",
[
IO.Combo.Input("polygon_type", options=["triangle", "quadrilateral"]),
IO.Boolean.Input("pbr", default=False),
],
),
IO.DynamicCombo.Option("Geometry", []),
],
),
IO.Int.Input(
"seed",
default=0,
min=0,
max=2147483647,
display_mode=IO.NumberDisplay.number,
control_after_generate=True,
tooltip="Seed controls whether the node should re-run; "
"results are non-deterministic regardless of seed.",
),
],
outputs=[
IO.String.Output(display_name="model_file"),
],
hidden=[
IO.Hidden.auth_token_comfy_org,
IO.Hidden.api_key_comfy_org,
IO.Hidden.unique_id,
],
is_api_node=True,
is_output_node=True,
price_badge=IO.PriceBadge(
depends_on=IO.PriceBadgeDepends(
widgets=["generate_type", "generate_type.pbr", "face_count"],
inputs=["image_left", "image_right", "image_back"],
),
expr="""
(
$base := widgets.generate_type = "normal" ? 25 : widgets.generate_type = "lowpoly" ? 30 : 15;
$multiview := (
inputs.image_left.connected or inputs.image_right.connected or inputs.image_back.connected
) ? 10 : 0;
$pbr := $lookup(widgets, "generate_type.pbr") ? 10 : 0;
$face := widgets.face_count != 500000 ? 10 : 0;
{"type":"usd","usd": ($base + $multiview + $pbr + $face) * 0.02}
)
""",
),
)
@classmethod
async def execute(
cls,
model: str,
image: Input.Image,
face_count: int,
generate_type: InputGenerateType,
seed: int,
image_left: Input.Image | None = None,
image_right: Input.Image | None = None,
image_back: Input.Image | None = None,
) -> IO.NodeOutput:
_ = seed
if model == "3.1" and generate_type["generate_type"].lower() == "lowpoly":
raise ValueError("The LowPoly option is currently unavailable for the 3.1 model.")
validate_image_dimensions(image, min_width=128, min_height=128)
multiview_images = []
for k, v in {
"left": image_left,
"right": image_right,
"back": image_back,
}.items():
if v is None:
continue
validate_image_dimensions(v, min_width=128, min_height=128)
multiview_images.append(
Hunyuan3DViewImage(
ViewType=k,
ViewImageUrl=await upload_image_to_comfyapi(
cls,
downscale_image_tensor_by_max_side(v, max_side=4900),
mime_type="image/webp",
total_pixels=24_010_000,
),
)
)
response = await sync_op(
cls,
ApiEndpoint(path="/proxy/tencent/hunyuan/3d-pro", method="POST"),
response_model=To3DProTaskCreateResponse,
data=To3DProTaskRequest(
Model=model,
FaceCount=face_count,
GenerateType=generate_type["generate_type"],
ImageUrl=await upload_image_to_comfyapi(
cls,
downscale_image_tensor_by_max_side(image, max_side=4900),
mime_type="image/webp",
total_pixels=24_010_000,
),
MultiViewImages=multiview_images if multiview_images else None,
EnablePBR=generate_type.get("pbr", None),
PolygonType=generate_type.get("polygon_type", None),
),
)
if response.Error:
raise ValueError(f"Task creation failed with code {response.Error.Code}: {response.Error.Message}")
result = await poll_op(
cls,
ApiEndpoint(path="/proxy/tencent/hunyuan/3d-pro/query", method="POST"),
data=To3DProTaskQueryRequest(JobId=response.JobId),
response_model=To3DProTaskResultResponse,
status_extractor=lambda r: r.Status,
)
model_file = f"hunyuan_model_{response.JobId}.glb"
await download_url_to_bytesio(
get_glb_obj_from_response(result.ResultFile3Ds).Url,
os.path.join(get_output_directory(), model_file),
)
return IO.NodeOutput(model_file)
class TencentHunyuan3DExtension(ComfyExtension):
@override
async def get_node_list(self) -> list[type[IO.ComfyNode]]:
return [
TencentTextToModelNode,
TencentImageToModelNode,
]
async def comfy_entrypoint() -> TencentHunyuan3DExtension:
return TencentHunyuan3DExtension()

View File

@ -249,7 +249,6 @@ async def finish_omni_video_task(cls: type[IO.ComfyNode], response: TaskStatusRe
ApiEndpoint(path=f"/proxy/kling/v1/videos/omni-video/{response.data.task_id}"),
response_model=TaskStatusResponse,
status_extractor=lambda r: (r.data.task_status if r.data else None),
max_poll_attempts=160,
)
return IO.NodeOutput(await download_url_to_video_output(final_response.data.task_result.videos[0].url))

View File

@ -149,7 +149,6 @@ class OpenAIVideoSora2(IO.ComfyNode):
response_model=Sora2GenerationResponse,
status_extractor=lambda x: x.status,
poll_interval=8.0,
max_poll_attempts=160,
estimated_duration=int(45 * (duration / 4) * model_time_multiplier),
)
return IO.NodeOutput(

View File

@ -203,7 +203,6 @@ class TopazImageEnhance(IO.ComfyNode):
progress_extractor=lambda x: getattr(x, "progress", 0),
price_extractor=lambda x: x.credits * 0.08,
poll_interval=8.0,
max_poll_attempts=160,
estimated_duration=60,
)

View File

@ -13,6 +13,7 @@ from .conversions import (
bytesio_to_image_tensor,
convert_mask_to_image,
downscale_image_tensor,
downscale_image_tensor_by_max_side,
image_tensor_pair_to_batch,
pil_to_bytesio,
resize_mask_to_image,
@ -33,6 +34,7 @@ from .download_helpers import (
from .upload_helpers import (
upload_audio_to_comfyapi,
upload_file_to_comfyapi,
upload_image_to_comfyapi,
upload_images_to_comfyapi,
upload_video_to_comfyapi,
)
@ -61,6 +63,7 @@ __all__ = [
# Upload helpers
"upload_audio_to_comfyapi",
"upload_file_to_comfyapi",
"upload_image_to_comfyapi",
"upload_images_to_comfyapi",
"upload_video_to_comfyapi",
# Download helpers
@ -75,6 +78,7 @@ __all__ = [
"bytesio_to_image_tensor",
"convert_mask_to_image",
"downscale_image_tensor",
"downscale_image_tensor_by_max_side",
"image_tensor_pair_to_batch",
"pil_to_bytesio",
"resize_mask_to_image",

View File

@ -141,7 +141,7 @@ async def poll_op(
queued_statuses: list[str | int] | None = None,
data: BaseModel | None = None,
poll_interval: float = 5.0,
max_poll_attempts: int = 120,
max_poll_attempts: int = 160,
timeout_per_poll: float = 120.0,
max_retries_per_poll: int = 3,
retry_delay_per_poll: float = 1.0,
@ -238,7 +238,7 @@ async def poll_op_raw(
queued_statuses: list[str | int] | None = None,
data: dict[str, Any] | BaseModel | None = None,
poll_interval: float = 5.0,
max_poll_attempts: int = 120,
max_poll_attempts: int = 160,
timeout_per_poll: float = 120.0,
max_retries_per_poll: int = 3,
retry_delay_per_poll: float = 1.0,

View File

@ -144,6 +144,21 @@ def downscale_image_tensor(image: torch.Tensor, total_pixels: int = 1536 * 1024)
return s
def downscale_image_tensor_by_max_side(image: torch.Tensor, *, max_side: int) -> torch.Tensor:
"""Downscale input image tensor so the largest dimension is at most max_side pixels."""
samples = image.movedim(-1, 1)
height, width = samples.shape[2], samples.shape[3]
max_dim = max(width, height)
if max_dim <= max_side:
return image
scale_by = max_side / max_dim
new_width = round(width * scale_by)
new_height = round(height * scale_by)
s = common_upscale(samples, new_width, new_height, "lanczos", "disabled")
s = s.movedim(1, -1)
return s
def tensor_to_data_uri(
image_tensor: torch.Tensor,
total_pixels: int = 2048 * 2048,

View File

@ -88,6 +88,28 @@ async def upload_images_to_comfyapi(
return download_urls
async def upload_image_to_comfyapi(
cls: type[IO.ComfyNode],
image: torch.Tensor,
*,
mime_type: str | None = None,
wait_label: str | None = "Uploading",
total_pixels: int = 2048 * 2048,
) -> str:
"""Uploads a single image to ComfyUI API and returns its download URL."""
return (
await upload_images_to_comfyapi(
cls,
image,
max_images=1,
mime_type=mime_type,
wait_label=wait_label,
show_batch_index=False,
total_pixels=total_pixels,
)
)[0]
async def upload_audio_to_comfyapi(
cls: type[IO.ComfyNode],
audio: Input.Audio,

View File

@ -104,19 +104,23 @@ class CustomComboNode(io.ComfyNode):
category="utils",
is_experimental=True,
inputs=[io.Combo.Input("choice", options=[])],
outputs=[io.String.Output()]
outputs=[
io.String.Output(display_name="STRING"),
io.Int.Output(display_name="INDEX"),
],
accept_all_inputs=True,
)
@classmethod
def validate_inputs(cls, choice: io.Combo.Type) -> bool:
def validate_inputs(cls, choice: io.Combo.Type, index: int = 0, **kwargs) -> bool:
# NOTE: DO NOT DO THIS unless you want to skip validation entirely on the node's inputs.
# I am doing that here because the widgets (besides the combo dropdown) on this node are fully frontend defined.
# I need to skip checking that the chosen combo option is in the options list, since those are defined by the user.
return True
@classmethod
def execute(cls, choice: io.Combo.Type) -> io.NodeOutput:
return io.NodeOutput(choice)
def execute(cls, choice: io.Combo.Type, index: int = 0, **kwargs) -> io.NodeOutput:
return io.NodeOutput(choice, index)
class DCTestNode(io.ComfyNode):

View File

@ -175,7 +175,7 @@ def get_input_data(inputs, class_def, unique_id, execution_list=None, dynprompt=
continue
obj = cached.outputs[output_index]
input_data_all[x] = obj
elif input_category is not None:
elif input_category is not None or (is_v3 and class_def.ACCEPT_ALL_INPUTS):
input_data_all[x] = [input_data]
if is_v3: