From d1504404662dfce6e401422701c2a7e24057b1b5 Mon Sep 17 00:00:00 2001
From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com>
Date: Wed, 14 Jan 2026 10:54:50 -0800
Subject: [PATCH 01/10] Fix VAELoader (#11880)
---
nodes.py | 1 +
1 file changed, 1 insertion(+)
diff --git a/nodes.py b/nodes.py
index 90c5f2a6e..aa8572446 100644
--- a/nodes.py
+++ b/nodes.py
@@ -788,6 +788,7 @@ class VAELoader:
#TODO: scale factor?
def load_vae(self, vae_name):
+ metadata = None
if vae_name == "pixel_space":
sd = {}
sd["pixel_space_vae"] = torch.tensor(1.0)
From 07f2462eae7fa2daa34971dd1b15fd525686e958 Mon Sep 17 00:00:00 2001
From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com>
Date: Wed, 14 Jan 2026 21:25:38 +0200
Subject: [PATCH 02/10] feat(api-nodes): add Meshy 3D nodes (#11843)
* feat(api-nodes): add Meshy 3D nodes
* rebased, added JSONata price badges
---
comfy_api_nodes/apis/meshy.py | 160 +++++
comfy_api_nodes/nodes_meshy.py | 790 +++++++++++++++++++++++++
comfy_api_nodes/util/upload_helpers.py | 23 +-
nodes.py | 1 +
4 files changed, 969 insertions(+), 5 deletions(-)
create mode 100644 comfy_api_nodes/apis/meshy.py
create mode 100644 comfy_api_nodes/nodes_meshy.py
diff --git a/comfy_api_nodes/apis/meshy.py b/comfy_api_nodes/apis/meshy.py
new file mode 100644
index 000000000..be46d0d58
--- /dev/null
+++ b/comfy_api_nodes/apis/meshy.py
@@ -0,0 +1,160 @@
+from typing import TypedDict
+
+from pydantic import BaseModel, Field
+
+from comfy_api.latest import Input
+
+
+class InputShouldRemesh(TypedDict):
+ should_remesh: str
+ topology: str
+ target_polycount: int
+
+
+class InputShouldTexture(TypedDict):
+ should_texture: str
+ enable_pbr: bool
+ texture_prompt: str
+ texture_image: Input.Image | None
+
+
+class MeshyTaskResponse(BaseModel):
+ result: str = Field(...)
+
+
+class MeshyTextToModelRequest(BaseModel):
+ mode: str = Field("preview")
+ prompt: str = Field(..., max_length=600)
+ art_style: str = Field(..., description="'realistic' or 'sculpture'")
+ ai_model: str = Field(...)
+ topology: str | None = Field(..., description="'quad' or 'triangle'")
+ target_polycount: int | None = Field(..., ge=100, le=300000)
+ should_remesh: bool = Field(
+ True,
+ description="False returns the original mesh, ignoring topology and polycount.",
+ )
+ symmetry_mode: str = Field(..., description="'auto', 'off' or 'on'")
+ pose_mode: str = Field(...)
+ seed: int = Field(...)
+ moderation: bool = Field(False)
+
+
+class MeshyRefineTask(BaseModel):
+ mode: str = Field("refine")
+ preview_task_id: str = Field(...)
+ enable_pbr: bool | None = Field(...)
+ texture_prompt: str | None = Field(...)
+ texture_image_url: str | None = Field(...)
+ ai_model: str = Field(...)
+ moderation: bool = Field(False)
+
+
+class MeshyImageToModelRequest(BaseModel):
+ image_url: str = Field(...)
+ ai_model: str = Field(...)
+ topology: str | None = Field(..., description="'quad' or 'triangle'")
+ target_polycount: int | None = Field(..., ge=100, le=300000)
+ symmetry_mode: str = Field(..., description="'auto', 'off' or 'on'")
+ should_remesh: bool = Field(
+ True,
+ description="False returns the original mesh, ignoring topology and polycount.",
+ )
+ should_texture: bool = Field(...)
+ enable_pbr: bool | None = Field(...)
+ pose_mode: str = Field(...)
+ texture_prompt: str | None = Field(None, max_length=600)
+ texture_image_url: str | None = Field(None)
+ seed: int = Field(...)
+ moderation: bool = Field(False)
+
+
+class MeshyMultiImageToModelRequest(BaseModel):
+ image_urls: list[str] = Field(...)
+ ai_model: str = Field(...)
+ topology: str | None = Field(..., description="'quad' or 'triangle'")
+ target_polycount: int | None = Field(..., ge=100, le=300000)
+ symmetry_mode: str = Field(..., description="'auto', 'off' or 'on'")
+ should_remesh: bool = Field(
+ True,
+ description="False returns the original mesh, ignoring topology and polycount.",
+ )
+ should_texture: bool = Field(...)
+ enable_pbr: bool | None = Field(...)
+ pose_mode: str = Field(...)
+ texture_prompt: str | None = Field(None, max_length=600)
+ texture_image_url: str | None = Field(None)
+ seed: int = Field(...)
+ moderation: bool = Field(False)
+
+
+class MeshyRiggingRequest(BaseModel):
+ input_task_id: str = Field(...)
+ height_meters: float = Field(...)
+ texture_image_url: str | None = Field(...)
+
+
+class MeshyAnimationRequest(BaseModel):
+ rig_task_id: str = Field(...)
+ action_id: int = Field(...)
+
+
+class MeshyTextureRequest(BaseModel):
+ input_task_id: str = Field(...)
+ ai_model: str = Field(...)
+ enable_original_uv: bool = Field(...)
+ enable_pbr: bool = Field(...)
+ text_style_prompt: str | None = Field(...)
+ image_style_url: str | None = Field(...)
+
+
+class MeshyModelsUrls(BaseModel):
+ glb: str = Field("")
+
+
+class MeshyRiggedModelsUrls(BaseModel):
+ rigged_character_glb_url: str = Field("")
+
+
+class MeshyAnimatedModelsUrls(BaseModel):
+ animation_glb_url: str = Field("")
+
+
+class MeshyResultTextureUrls(BaseModel):
+ base_color: str = Field(...)
+ metallic: str | None = Field(None)
+ normal: str | None = Field(None)
+ roughness: str | None = Field(None)
+
+
+class MeshyTaskError(BaseModel):
+ message: str | None = Field(None)
+
+
+class MeshyModelResult(BaseModel):
+ id: str = Field(...)
+ type: str = Field(...)
+ model_urls: MeshyModelsUrls = Field(MeshyModelsUrls())
+ thumbnail_url: str = Field(...)
+ video_url: str | None = Field(None)
+ status: str = Field(...)
+ progress: int = Field(0)
+ texture_urls: list[MeshyResultTextureUrls] | None = Field([])
+ task_error: MeshyTaskError | None = Field(None)
+
+
+class MeshyRiggedResult(BaseModel):
+ id: str = Field(...)
+ type: str = Field(...)
+ status: str = Field(...)
+ progress: int = Field(0)
+ result: MeshyRiggedModelsUrls = Field(MeshyRiggedModelsUrls())
+ task_error: MeshyTaskError | None = Field(None)
+
+
+class MeshyAnimationResult(BaseModel):
+ id: str = Field(...)
+ type: str = Field(...)
+ status: str = Field(...)
+ progress: int = Field(0)
+ result: MeshyAnimatedModelsUrls = Field(MeshyAnimatedModelsUrls())
+ task_error: MeshyTaskError | None = Field(None)
diff --git a/comfy_api_nodes/nodes_meshy.py b/comfy_api_nodes/nodes_meshy.py
new file mode 100644
index 000000000..740607983
--- /dev/null
+++ b/comfy_api_nodes/nodes_meshy.py
@@ -0,0 +1,790 @@
+import os
+
+from typing_extensions import override
+
+from comfy_api.latest import IO, ComfyExtension, Input
+from comfy_api_nodes.apis.meshy import (
+ InputShouldRemesh,
+ InputShouldTexture,
+ MeshyAnimationRequest,
+ MeshyAnimationResult,
+ MeshyImageToModelRequest,
+ MeshyModelResult,
+ MeshyMultiImageToModelRequest,
+ MeshyRefineTask,
+ MeshyRiggedResult,
+ MeshyRiggingRequest,
+ MeshyTaskResponse,
+ MeshyTextToModelRequest,
+ MeshyTextureRequest,
+)
+from comfy_api_nodes.util import (
+ ApiEndpoint,
+ download_url_to_bytesio,
+ poll_op,
+ sync_op,
+ upload_images_to_comfyapi,
+ validate_string,
+)
+from folder_paths import get_output_directory
+
+
+class MeshyTextToModelNode(IO.ComfyNode):
+
+ @classmethod
+ def define_schema(cls):
+ return IO.Schema(
+ node_id="MeshyTextToModelNode",
+ display_name="Meshy: Text to Model",
+ category="api node/3d/Meshy",
+ inputs=[
+ IO.Combo.Input("model", options=["latest"]),
+ IO.String.Input("prompt", multiline=True, default=""),
+ IO.Combo.Input("style", options=["realistic", "sculpture"]),
+ IO.DynamicCombo.Input(
+ "should_remesh",
+ options=[
+ IO.DynamicCombo.Option(
+ "true",
+ [
+ IO.Combo.Input("topology", options=["triangle", "quad"]),
+ IO.Int.Input(
+ "target_polycount",
+ default=300000,
+ min=100,
+ max=300000,
+ display_mode=IO.NumberDisplay.number,
+ ),
+ ],
+ ),
+ IO.DynamicCombo.Option("false", []),
+ ],
+ tooltip="When set to false, returns an unprocessed triangular mesh.",
+ ),
+ IO.Combo.Input("symmetry_mode", options=["auto", "on", "off"]),
+ IO.Combo.Input(
+ "pose_mode",
+ options=["", "A-pose", "T-pose"],
+ tooltip="Specify the pose mode for the generated model.",
+ ),
+ IO.Int.Input(
+ "seed",
+ default=0,
+ min=0,
+ max=2147483647,
+ display_mode=IO.NumberDisplay.number,
+ control_after_generate=True,
+ tooltip="Seed controls whether the node should re-run; "
+ "results are non-deterministic regardless of seed.",
+ ),
+ ],
+ outputs=[
+ IO.String.Output(display_name="model_file"),
+ IO.Custom("MESHY_TASK_ID").Output(display_name="meshy_task_id"),
+ ],
+ hidden=[
+ IO.Hidden.auth_token_comfy_org,
+ IO.Hidden.api_key_comfy_org,
+ IO.Hidden.unique_id,
+ ],
+ is_api_node=True,
+ is_output_node=True,
+ price_badge=IO.PriceBadge(
+ expr="""{"type":"usd","usd":0.8}""",
+ ),
+ )
+
+ @classmethod
+ async def execute(
+ cls,
+ model: str,
+ prompt: str,
+ style: str,
+ should_remesh: InputShouldRemesh,
+ symmetry_mode: str,
+ pose_mode: str,
+ seed: int,
+ ) -> IO.NodeOutput:
+ validate_string(prompt, field_name="prompt", min_length=1, max_length=600)
+ response = await sync_op(
+ cls,
+ ApiEndpoint(path="/proxy/meshy/openapi/v2/text-to-3d", method="POST"),
+ response_model=MeshyTaskResponse,
+ data=MeshyTextToModelRequest(
+ prompt=prompt,
+ art_style=style,
+ ai_model=model,
+ topology=should_remesh.get("topology", None),
+ target_polycount=should_remesh.get("target_polycount", None),
+ should_remesh=should_remesh["should_remesh"] == "true",
+ symmetry_mode=symmetry_mode,
+ pose_mode=pose_mode.lower(),
+ seed=seed,
+ ),
+ )
+ result = await poll_op(
+ cls,
+ ApiEndpoint(path=f"/proxy/meshy/openapi/v2/text-to-3d/{response.result}"),
+ response_model=MeshyModelResult,
+ status_extractor=lambda r: r.status,
+ progress_extractor=lambda r: r.progress,
+ )
+ model_file = f"meshy_model_{response.result}.glb"
+ await download_url_to_bytesio(result.model_urls.glb, os.path.join(get_output_directory(), model_file))
+ return IO.NodeOutput(model_file, response.result)
+
+
+class MeshyRefineNode(IO.ComfyNode):
+
+ @classmethod
+ def define_schema(cls):
+ return IO.Schema(
+ node_id="MeshyRefineNode",
+ display_name="Meshy: Refine Draft Model",
+ category="api node/3d/Meshy",
+ description="Refine a previously created draft model.",
+ inputs=[
+ IO.Combo.Input("model", options=["latest"]),
+ IO.Custom("MESHY_TASK_ID").Input("meshy_task_id"),
+ IO.Boolean.Input(
+ "enable_pbr",
+ default=False,
+ tooltip="Generate PBR Maps (metallic, roughness, normal) in addition to the base color. "
+ "Note: this should be set to false when using Sculpture style, "
+ "as Sculpture style generates its own set of PBR maps.",
+ ),
+ IO.String.Input(
+ "texture_prompt",
+ default="",
+ multiline=True,
+ tooltip="Provide a text prompt to guide the texturing process. "
+ "Maximum 600 characters. Cannot be used at the same time as 'texture_image'.",
+ ),
+ IO.Image.Input(
+ "texture_image",
+ tooltip="Only one of 'texture_image' or 'texture_prompt' may be used at the same time.",
+ optional=True,
+ ),
+ ],
+ outputs=[
+ IO.String.Output(display_name="model_file"),
+ IO.Custom("MESHY_TASK_ID").Output(display_name="meshy_task_id"),
+ ],
+ hidden=[
+ IO.Hidden.auth_token_comfy_org,
+ IO.Hidden.api_key_comfy_org,
+ IO.Hidden.unique_id,
+ ],
+ is_api_node=True,
+ is_output_node=True,
+ price_badge=IO.PriceBadge(
+ expr="""{"type":"usd","usd":0.4}""",
+ ),
+ )
+
+ @classmethod
+ async def execute(
+ cls,
+ model: str,
+ meshy_task_id: str,
+ enable_pbr: bool,
+ texture_prompt: str,
+ texture_image: Input.Image | None = None,
+ ) -> IO.NodeOutput:
+ if texture_prompt and texture_image is not None:
+ raise ValueError("texture_prompt and texture_image cannot be used at the same time")
+ texture_image_url = None
+ if texture_prompt:
+ validate_string(texture_prompt, field_name="texture_prompt", max_length=600)
+ if texture_image is not None:
+ texture_image_url = (await upload_images_to_comfyapi(cls, texture_image, wait_label="Uploading texture"))[0]
+ response = await sync_op(
+ cls,
+ endpoint=ApiEndpoint(path="/proxy/meshy/openapi/v2/text-to-3d", method="POST"),
+ response_model=MeshyTaskResponse,
+ data=MeshyRefineTask(
+ preview_task_id=meshy_task_id,
+ enable_pbr=enable_pbr,
+ texture_prompt=texture_prompt if texture_prompt else None,
+ texture_image_url=texture_image_url,
+ ai_model=model,
+ ),
+ )
+ result = await poll_op(
+ cls,
+ ApiEndpoint(path=f"/proxy/meshy/openapi/v2/text-to-3d/{response.result}"),
+ response_model=MeshyModelResult,
+ status_extractor=lambda r: r.status,
+ progress_extractor=lambda r: r.progress,
+ )
+ model_file = f"meshy_model_{response.result}.glb"
+ await download_url_to_bytesio(result.model_urls.glb, os.path.join(get_output_directory(), model_file))
+ return IO.NodeOutput(model_file, response.result)
+
+
+class MeshyImageToModelNode(IO.ComfyNode):
+
+ @classmethod
+ def define_schema(cls):
+ return IO.Schema(
+ node_id="MeshyImageToModelNode",
+ display_name="Meshy: Image to Model",
+ category="api node/3d/Meshy",
+ inputs=[
+ IO.Combo.Input("model", options=["latest"]),
+ IO.Image.Input("image"),
+ IO.DynamicCombo.Input(
+ "should_remesh",
+ options=[
+ IO.DynamicCombo.Option(
+ "true",
+ [
+ IO.Combo.Input("topology", options=["triangle", "quad"]),
+ IO.Int.Input(
+ "target_polycount",
+ default=300000,
+ min=100,
+ max=300000,
+ display_mode=IO.NumberDisplay.number,
+ ),
+ ],
+ ),
+ IO.DynamicCombo.Option("false", []),
+ ],
+ tooltip="When set to false, returns an unprocessed triangular mesh.",
+ ),
+ IO.Combo.Input("symmetry_mode", options=["auto", "on", "off"]),
+ IO.DynamicCombo.Input(
+ "should_texture",
+ options=[
+ IO.DynamicCombo.Option(
+ "true",
+ [
+ IO.Boolean.Input(
+ "enable_pbr",
+ default=False,
+ tooltip="Generate PBR Maps (metallic, roughness, normal) "
+ "in addition to the base color.",
+ ),
+ IO.String.Input(
+ "texture_prompt",
+ default="",
+ multiline=True,
+ tooltip="Provide a text prompt to guide the texturing process. "
+ "Maximum 600 characters. Cannot be used at the same time as 'texture_image'.",
+ ),
+ IO.Image.Input(
+ "texture_image",
+ tooltip="Only one of 'texture_image' or 'texture_prompt' "
+ "may be used at the same time.",
+ optional=True,
+ ),
+ ],
+ ),
+ IO.DynamicCombo.Option("false", []),
+ ],
+ tooltip="Determines whether textures are generated. "
+ "Setting it to false skips the texture phase and returns a mesh without textures.",
+ ),
+ IO.Combo.Input(
+ "pose_mode",
+ options=["", "A-pose", "T-pose"],
+ tooltip="Specify the pose mode for the generated model.",
+ ),
+ IO.Int.Input(
+ "seed",
+ default=0,
+ min=0,
+ max=2147483647,
+ display_mode=IO.NumberDisplay.number,
+ control_after_generate=True,
+ tooltip="Seed controls whether the node should re-run; "
+ "results are non-deterministic regardless of seed.",
+ ),
+ ],
+ outputs=[
+ IO.String.Output(display_name="model_file"),
+ IO.Custom("MESHY_TASK_ID").Output(display_name="meshy_task_id"),
+ ],
+ hidden=[
+ IO.Hidden.auth_token_comfy_org,
+ IO.Hidden.api_key_comfy_org,
+ IO.Hidden.unique_id,
+ ],
+ is_api_node=True,
+ is_output_node=True,
+ price_badge=IO.PriceBadge(
+ depends_on=IO.PriceBadgeDepends(widgets=["should_texture"]),
+ expr="""
+ (
+ $prices := {"true": 1.2, "false": 0.8};
+ {"type":"usd","usd": $lookup($prices, widgets.should_texture)}
+ )
+ """,
+ ),
+ )
+
+ @classmethod
+ async def execute(
+ cls,
+ model: str,
+ image: Input.Image,
+ should_remesh: InputShouldRemesh,
+ symmetry_mode: str,
+ should_texture: InputShouldTexture,
+ pose_mode: str,
+ seed: int,
+ ) -> IO.NodeOutput:
+ texture = should_texture["should_texture"] == "true"
+ texture_image_url = texture_prompt = None
+ if texture:
+ if should_texture["texture_prompt"] and should_texture["texture_image"] is not None:
+ raise ValueError("texture_prompt and texture_image cannot be used at the same time")
+ if should_texture["texture_prompt"]:
+ validate_string(should_texture["texture_prompt"], field_name="texture_prompt", max_length=600)
+ texture_prompt = should_texture["texture_prompt"]
+ if should_texture["texture_image"] is not None:
+ texture_image_url = (
+ await upload_images_to_comfyapi(
+ cls, should_texture["texture_image"], wait_label="Uploading texture"
+ )
+ )[0]
+ response = await sync_op(
+ cls,
+ ApiEndpoint(path="/proxy/meshy/openapi/v1/image-to-3d", method="POST"),
+ response_model=MeshyTaskResponse,
+ data=MeshyImageToModelRequest(
+ image_url=(await upload_images_to_comfyapi(cls, image, wait_label="Uploading base image"))[0],
+ ai_model=model,
+ topology=should_remesh.get("topology", None),
+ target_polycount=should_remesh.get("target_polycount", None),
+ symmetry_mode=symmetry_mode,
+ should_remesh=should_remesh["should_remesh"] == "true",
+ should_texture=texture,
+ enable_pbr=should_texture.get("enable_pbr", None),
+ pose_mode=pose_mode.lower(),
+ texture_prompt=texture_prompt,
+ texture_image_url=texture_image_url,
+ seed=seed,
+ ),
+ )
+ result = await poll_op(
+ cls,
+ ApiEndpoint(path=f"/proxy/meshy/openapi/v1/image-to-3d/{response.result}"),
+ response_model=MeshyModelResult,
+ status_extractor=lambda r: r.status,
+ progress_extractor=lambda r: r.progress,
+ )
+ model_file = f"meshy_model_{response.result}.glb"
+ await download_url_to_bytesio(result.model_urls.glb, os.path.join(get_output_directory(), model_file))
+ return IO.NodeOutput(model_file, response.result)
+
+
+class MeshyMultiImageToModelNode(IO.ComfyNode):
+
+ @classmethod
+ def define_schema(cls):
+ return IO.Schema(
+ node_id="MeshyMultiImageToModelNode",
+ display_name="Meshy: Multi-Image to Model",
+ category="api node/3d/Meshy",
+ inputs=[
+ IO.Combo.Input("model", options=["latest"]),
+ IO.Autogrow.Input(
+ "images",
+ template=IO.Autogrow.TemplatePrefix(IO.Image.Input("image"), prefix="image", min=2, max=4),
+ ),
+ IO.DynamicCombo.Input(
+ "should_remesh",
+ options=[
+ IO.DynamicCombo.Option(
+ "true",
+ [
+ IO.Combo.Input("topology", options=["triangle", "quad"]),
+ IO.Int.Input(
+ "target_polycount",
+ default=300000,
+ min=100,
+ max=300000,
+ display_mode=IO.NumberDisplay.number,
+ ),
+ ],
+ ),
+ IO.DynamicCombo.Option("false", []),
+ ],
+ tooltip="When set to false, returns an unprocessed triangular mesh.",
+ ),
+ IO.Combo.Input("symmetry_mode", options=["auto", "on", "off"]),
+ IO.DynamicCombo.Input(
+ "should_texture",
+ options=[
+ IO.DynamicCombo.Option(
+ "true",
+ [
+ IO.Boolean.Input(
+ "enable_pbr",
+ default=False,
+ tooltip="Generate PBR Maps (metallic, roughness, normal) "
+ "in addition to the base color.",
+ ),
+ IO.String.Input(
+ "texture_prompt",
+ default="",
+ multiline=True,
+ tooltip="Provide a text prompt to guide the texturing process. "
+ "Maximum 600 characters. Cannot be used at the same time as 'texture_image'.",
+ ),
+ IO.Image.Input(
+ "texture_image",
+ tooltip="Only one of 'texture_image' or 'texture_prompt' "
+ "may be used at the same time.",
+ optional=True,
+ ),
+ ],
+ ),
+ IO.DynamicCombo.Option("false", []),
+ ],
+ tooltip="Determines whether textures are generated. "
+ "Setting it to false skips the texture phase and returns a mesh without textures.",
+ ),
+ IO.Combo.Input(
+ "pose_mode",
+ options=["", "A-pose", "T-pose"],
+ tooltip="Specify the pose mode for the generated model.",
+ ),
+ IO.Int.Input(
+ "seed",
+ default=0,
+ min=0,
+ max=2147483647,
+ display_mode=IO.NumberDisplay.number,
+ control_after_generate=True,
+ tooltip="Seed controls whether the node should re-run; "
+ "results are non-deterministic regardless of seed.",
+ ),
+ ],
+ outputs=[
+ IO.String.Output(display_name="model_file"),
+ IO.Custom("MESHY_TASK_ID").Output(display_name="meshy_task_id"),
+ ],
+ hidden=[
+ IO.Hidden.auth_token_comfy_org,
+ IO.Hidden.api_key_comfy_org,
+ IO.Hidden.unique_id,
+ ],
+ is_api_node=True,
+ is_output_node=True,
+ price_badge=IO.PriceBadge(
+ depends_on=IO.PriceBadgeDepends(widgets=["should_texture"]),
+ expr="""
+ (
+ $prices := {"true": 0.6, "false": 0.2};
+ {"type":"usd","usd": $lookup($prices, widgets.should_texture)}
+ )
+ """,
+ ),
+ )
+
+ @classmethod
+ async def execute(
+ cls,
+ model: str,
+ images: IO.Autogrow.Type,
+ should_remesh: InputShouldRemesh,
+ symmetry_mode: str,
+ should_texture: InputShouldTexture,
+ pose_mode: str,
+ seed: int,
+ ) -> IO.NodeOutput:
+ texture = should_texture["should_texture"] == "true"
+ texture_image_url = texture_prompt = None
+ if texture:
+ if should_texture["texture_prompt"] and should_texture["texture_image"] is not None:
+ raise ValueError("texture_prompt and texture_image cannot be used at the same time")
+ if should_texture["texture_prompt"]:
+ validate_string(should_texture["texture_prompt"], field_name="texture_prompt", max_length=600)
+ texture_prompt = should_texture["texture_prompt"]
+ if should_texture["texture_image"] is not None:
+ texture_image_url = (
+ await upload_images_to_comfyapi(
+ cls, should_texture["texture_image"], wait_label="Uploading texture"
+ )
+ )[0]
+ response = await sync_op(
+ cls,
+ ApiEndpoint(path="/proxy/meshy/openapi/v1/multi-image-to-3d", method="POST"),
+ response_model=MeshyTaskResponse,
+ data=MeshyMultiImageToModelRequest(
+ image_urls=await upload_images_to_comfyapi(
+ cls, list(images.values()), wait_label="Uploading base images"
+ ),
+ ai_model=model,
+ topology=should_remesh.get("topology", None),
+ target_polycount=should_remesh.get("target_polycount", None),
+ symmetry_mode=symmetry_mode,
+ should_remesh=should_remesh["should_remesh"] == "true",
+ should_texture=texture,
+ enable_pbr=should_texture.get("enable_pbr", None),
+ pose_mode=pose_mode.lower(),
+ texture_prompt=texture_prompt,
+ texture_image_url=texture_image_url,
+ seed=seed,
+ ),
+ )
+ result = await poll_op(
+ cls,
+ ApiEndpoint(path=f"/proxy/meshy/openapi/v1/multi-image-to-3d/{response.result}"),
+ response_model=MeshyModelResult,
+ status_extractor=lambda r: r.status,
+ progress_extractor=lambda r: r.progress,
+ )
+ model_file = f"meshy_model_{response.result}.glb"
+ await download_url_to_bytesio(result.model_urls.glb, os.path.join(get_output_directory(), model_file))
+ return IO.NodeOutput(model_file, response.result)
+
+
+class MeshyRigModelNode(IO.ComfyNode):
+
+ @classmethod
+ def define_schema(cls):
+ return IO.Schema(
+ node_id="MeshyRigModelNode",
+ display_name="Meshy: Rig Model",
+ category="api node/3d/Meshy",
+ description="Provides a rigged character in standard formats. "
+ "Auto-rigging is currently not suitable for untextured meshes, non-humanoid assets, "
+ "or humanoid assets with unclear limb and body structure.",
+ inputs=[
+ IO.Custom("MESHY_TASK_ID").Input("meshy_task_id"),
+ IO.Float.Input(
+ "height_meters",
+ min=0.1,
+ max=15.0,
+ default=1.7,
+ tooltip="The approximate height of the character model in meters. "
+ "This aids in scaling and rigging accuracy.",
+ ),
+ IO.Image.Input(
+ "texture_image",
+ tooltip="The model's UV-unwrapped base color texture image.",
+ optional=True,
+ ),
+ ],
+ outputs=[
+ IO.String.Output(display_name="model_file"),
+ IO.Custom("MESHY_RIGGED_TASK_ID").Output(display_name="rig_task_id"),
+ ],
+ hidden=[
+ IO.Hidden.auth_token_comfy_org,
+ IO.Hidden.api_key_comfy_org,
+ IO.Hidden.unique_id,
+ ],
+ is_api_node=True,
+ is_output_node=True,
+ price_badge=IO.PriceBadge(
+ expr="""{"type":"usd","usd":0.2}""",
+ ),
+ )
+
+ @classmethod
+ async def execute(
+ cls,
+ meshy_task_id: str,
+ height_meters: float,
+ texture_image: Input.Image | None = None,
+ ) -> IO.NodeOutput:
+ texture_image_url = None
+ if texture_image is not None:
+ texture_image_url = (await upload_images_to_comfyapi(cls, texture_image, wait_label="Uploading texture"))[0]
+ response = await sync_op(
+ cls,
+ endpoint=ApiEndpoint(path="/proxy/meshy/openapi/v1/rigging", method="POST"),
+ response_model=MeshyTaskResponse,
+ data=MeshyRiggingRequest(
+ input_task_id=meshy_task_id,
+ height_meters=height_meters,
+ texture_image_url=texture_image_url,
+ ),
+ )
+ result = await poll_op(
+ cls,
+ ApiEndpoint(path=f"/proxy/meshy/openapi/v1/rigging/{response.result}"),
+ response_model=MeshyRiggedResult,
+ status_extractor=lambda r: r.status,
+ progress_extractor=lambda r: r.progress,
+ )
+ model_file = f"meshy_model_{response.result}.glb"
+ await download_url_to_bytesio(
+ result.result.rigged_character_glb_url, os.path.join(get_output_directory(), model_file)
+ )
+ return IO.NodeOutput(model_file, response.result)
+
+
+class MeshyAnimateModelNode(IO.ComfyNode):
+
+ @classmethod
+ def define_schema(cls):
+ return IO.Schema(
+ node_id="MeshyAnimateModelNode",
+ display_name="Meshy: Animate Model",
+ category="api node/3d/Meshy",
+ description="Apply a specific animation action to a previously rigged character.",
+ inputs=[
+ IO.Custom("MESHY_RIGGED_TASK_ID").Input("rig_task_id"),
+ IO.Int.Input(
+ "action_id",
+ default=0,
+ min=0,
+ max=696,
+ tooltip="Visit https://docs.meshy.ai/en/api/animation-library for a list of available values.",
+ ),
+ ],
+ outputs=[
+ IO.String.Output(display_name="model_file"),
+ ],
+ hidden=[
+ IO.Hidden.auth_token_comfy_org,
+ IO.Hidden.api_key_comfy_org,
+ IO.Hidden.unique_id,
+ ],
+ is_api_node=True,
+ is_output_node=True,
+ price_badge=IO.PriceBadge(
+ expr="""{"type":"usd","usd":0.12}""",
+ ),
+ )
+
+ @classmethod
+ async def execute(
+ cls,
+ rig_task_id: str,
+ action_id: int,
+ ) -> IO.NodeOutput:
+ response = await sync_op(
+ cls,
+ endpoint=ApiEndpoint(path="/proxy/meshy/openapi/v1/animations", method="POST"),
+ response_model=MeshyTaskResponse,
+ data=MeshyAnimationRequest(
+ rig_task_id=rig_task_id,
+ action_id=action_id,
+ ),
+ )
+ result = await poll_op(
+ cls,
+ ApiEndpoint(path=f"/proxy/meshy/openapi/v1/animations/{response.result}"),
+ response_model=MeshyAnimationResult,
+ status_extractor=lambda r: r.status,
+ progress_extractor=lambda r: r.progress,
+ )
+ model_file = f"meshy_model_{response.result}.glb"
+ await download_url_to_bytesio(result.result.animation_glb_url, os.path.join(get_output_directory(), model_file))
+ return IO.NodeOutput(model_file, response.result)
+
+
+class MeshyTextureNode(IO.ComfyNode):
+
+ @classmethod
+ def define_schema(cls):
+ return IO.Schema(
+ node_id="MeshyTextureNode",
+ display_name="Meshy: Texture Model",
+ category="api node/3d/Meshy",
+ inputs=[
+ IO.Combo.Input("model", options=["latest"]),
+ IO.Custom("MESHY_TASK_ID").Input("meshy_task_id"),
+ IO.Boolean.Input(
+ "enable_original_uv",
+ default=True,
+ tooltip="Use the original UV of the model instead of generating new UVs. "
+ "When enabled, Meshy preserves existing textures from the uploaded model. "
+ "If the model has no original UV, the quality of the output might not be as good.",
+ ),
+ IO.Boolean.Input("pbr", default=False),
+ IO.String.Input(
+ "text_style_prompt",
+ default="",
+ multiline=True,
+ tooltip="Describe your desired texture style of the object using text. Maximum 600 characters."
+ "Maximum 600 characters. Cannot be used at the same time as 'image_style'.",
+ ),
+ IO.Image.Input(
+ "image_style",
+ optional=True,
+ tooltip="A 2d image to guide the texturing process. "
+ "Can not be used at the same time with 'text_style_prompt'.",
+ ),
+ ],
+ outputs=[
+ IO.String.Output(display_name="model_file"),
+ IO.Custom("MODEL_TASK_ID").Output(display_name="meshy_task_id"),
+ ],
+ hidden=[
+ IO.Hidden.auth_token_comfy_org,
+ IO.Hidden.api_key_comfy_org,
+ IO.Hidden.unique_id,
+ ],
+ is_api_node=True,
+ is_output_node=True,
+ price_badge=IO.PriceBadge(
+ expr="""{"type":"usd","usd":0.4}""",
+ ),
+ )
+
+ @classmethod
+ async def execute(
+ cls,
+ model: str,
+ meshy_task_id: str,
+ enable_original_uv: bool,
+ pbr: bool,
+ text_style_prompt: str,
+ image_style: Input.Image | None = None,
+ ) -> IO.NodeOutput:
+ if text_style_prompt and image_style is not None:
+ raise ValueError("text_style_prompt and image_style cannot be used at the same time")
+ if not text_style_prompt and image_style is None:
+ raise ValueError("Either text_style_prompt or image_style is required")
+ image_style_url = None
+ if image_style is not None:
+ image_style_url = (await upload_images_to_comfyapi(cls, image_style, wait_label="Uploading style"))[0]
+ response = await sync_op(
+ cls,
+ endpoint=ApiEndpoint(path="/proxy/meshy/openapi/v1/retexture", method="POST"),
+ response_model=MeshyTaskResponse,
+ data=MeshyTextureRequest(
+ input_task_id=meshy_task_id,
+ ai_model=model,
+ enable_original_uv=enable_original_uv,
+ enable_pbr=pbr,
+ text_style_prompt=text_style_prompt if text_style_prompt else None,
+ image_style_url=image_style_url,
+ ),
+ )
+ result = await poll_op(
+ cls,
+ ApiEndpoint(path=f"/proxy/meshy/openapi/v1/retexture/{response.result}"),
+ response_model=MeshyModelResult,
+ status_extractor=lambda r: r.status,
+ progress_extractor=lambda r: r.progress,
+ )
+ model_file = f"meshy_model_{response.result}.glb"
+ await download_url_to_bytesio(result.model_urls.glb, os.path.join(get_output_directory(), model_file))
+ return IO.NodeOutput(model_file, response.result)
+
+
+class MeshyExtension(ComfyExtension):
+ @override
+ async def get_node_list(self) -> list[type[IO.ComfyNode]]:
+ return [
+ MeshyTextToModelNode,
+ MeshyRefineNode,
+ MeshyImageToModelNode,
+ MeshyMultiImageToModelNode,
+ MeshyRigModelNode,
+ MeshyAnimateModelNode,
+ MeshyTextureNode,
+ ]
+
+
+async def comfy_entrypoint() -> MeshyExtension:
+ return MeshyExtension()
diff --git a/comfy_api_nodes/util/upload_helpers.py b/comfy_api_nodes/util/upload_helpers.py
index cea0d1203..2794be35c 100644
--- a/comfy_api_nodes/util/upload_helpers.py
+++ b/comfy_api_nodes/util/upload_helpers.py
@@ -43,7 +43,7 @@ class UploadResponse(BaseModel):
async def upload_images_to_comfyapi(
cls: type[IO.ComfyNode],
- image: torch.Tensor,
+ image: torch.Tensor | list[torch.Tensor],
*,
max_images: int = 8,
mime_type: str | None = None,
@@ -55,15 +55,28 @@ async def upload_images_to_comfyapi(
Uploads images to ComfyUI API and returns download URLs.
To upload multiple images, stack them in the batch dimension first.
"""
+ tensors: list[torch.Tensor] = []
+ if isinstance(image, list):
+ for img in image:
+ is_batch = len(img.shape) > 3
+ if is_batch:
+ tensors.extend(img[i] for i in range(img.shape[0]))
+ else:
+ tensors.append(img)
+ else:
+ is_batch = len(image.shape) > 3
+ if is_batch:
+ tensors.extend(image[i] for i in range(image.shape[0]))
+ else:
+ tensors.append(image)
+
# if batched, try to upload each file if max_images is greater than 0
download_urls: list[str] = []
- is_batch = len(image.shape) > 3
- batch_len = image.shape[0] if is_batch else 1
- num_to_upload = min(batch_len, max_images)
+ num_to_upload = min(len(tensors), max_images)
batch_start_ts = time.monotonic()
for idx in range(num_to_upload):
- tensor = image[idx] if is_batch else image
+ tensor = tensors[idx]
img_io = tensor_to_bytesio(tensor, total_pixels=total_pixels, mime_type=mime_type)
effective_label = wait_label
diff --git a/nodes.py b/nodes.py
index aa8572446..f19d5fd1c 100644
--- a/nodes.py
+++ b/nodes.py
@@ -2401,6 +2401,7 @@ async def init_builtin_api_nodes():
"nodes_sora.py",
"nodes_topaz.py",
"nodes_tripo.py",
+ "nodes_meshy.py",
"nodes_moonvalley.py",
"nodes_rodin.py",
"nodes_gemini.py",
From 80441eb15e807aa280fb462cbb43d14191344ba4 Mon Sep 17 00:00:00 2001
From: rattus <46076784+rattus128@users.noreply.github.com>
Date: Wed, 14 Jan 2026 14:53:16 -0800
Subject: [PATCH 03/10] utils: fix lanczos grayscale upscaling (#11873)
---
comfy/utils.py | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/comfy/utils.py b/comfy/utils.py
index fac13f128..2e33a4258 100644
--- a/comfy/utils.py
+++ b/comfy/utils.py
@@ -929,7 +929,9 @@ def bislerp(samples, width, height):
return result.to(orig_dtype)
def lanczos(samples, width, height):
- images = [Image.fromarray(np.clip(255. * image.movedim(0, -1).cpu().numpy(), 0, 255).astype(np.uint8)) for image in samples]
+ #the below API is strict and expects grayscale to be squeezed
+ samples = samples.squeeze(1) if samples.shape[1] == 1 else samples.movedim(1, -1)
+ images = [Image.fromarray(np.clip(255. * image.cpu().numpy(), 0, 255).astype(np.uint8)) for image in samples]
images = [image.resize((width, height), resample=Image.Resampling.LANCZOS) for image in images]
images = [torch.from_numpy(np.array(image).astype(np.float32) / 255.0).movedim(-1, 0) for image in images]
result = torch.stack(images)
From be518db5a7daa6010fb1c312c0832b9833a71d10 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Jukka=20Sepp=C3=A4nen?=
<40791699+kijai@users.noreply.github.com>
Date: Thu, 15 Jan 2026 00:54:04 +0200
Subject: [PATCH 04/10] Remove extraneous clip missing warnings when loading
LTX2 embeddings_connector weights (#11874)
---
comfy/text_encoders/lt.py | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/comfy/text_encoders/lt.py b/comfy/text_encoders/lt.py
index 776e25e97..c33c77db7 100644
--- a/comfy/text_encoders/lt.py
+++ b/comfy/text_encoders/lt.py
@@ -118,8 +118,9 @@ class LTXAVTEModel(torch.nn.Module):
sdo = comfy.utils.state_dict_prefix_replace(sd, {"text_embedding_projection.aggregate_embed.weight": "text_embedding_projection.weight", "model.diffusion_model.video_embeddings_connector.": "video_embeddings_connector.", "model.diffusion_model.audio_embeddings_connector.": "audio_embeddings_connector."}, filter_keys=True)
if len(sdo) == 0:
sdo = sd
-
- return self.load_state_dict(sdo, strict=False)
+ missing, unexpected = self.load_state_dict(sdo, strict=False)
+ missing = [k for k in missing if not k.startswith("gemma3_12b.")] # filter out keys that belong to the main gemma model
+ return (missing, unexpected)
def memory_estimation_function(self, token_weight_pairs, device=None):
constant = 6.0
From 3b832231bb81024d80bbe31b7d7e51e07b633beb Mon Sep 17 00:00:00 2001
From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com>
Date: Thu, 15 Jan 2026 07:33:15 -0800
Subject: [PATCH 05/10] Flux2 Klein support. (#11890)
---
comfy/sd.py | 15 +++++++--
comfy/text_encoders/flux.py | 59 +++++++++++++++++++++++++++++++++++-
comfy/text_encoders/llama.py | 31 +++++++++++++++++++
3 files changed, 102 insertions(+), 3 deletions(-)
diff --git a/comfy/sd.py b/comfy/sd.py
index b689c0dfc..77700dfd3 100644
--- a/comfy/sd.py
+++ b/comfy/sd.py
@@ -1014,6 +1014,7 @@ class CLIPType(Enum):
KANDINSKY5 = 22
KANDINSKY5_IMAGE = 23
NEWBIE = 24
+ FLUX2 = 25
def load_clip(ckpt_paths, embedding_directory=None, clip_type=CLIPType.STABLE_DIFFUSION, model_options={}):
@@ -1046,6 +1047,7 @@ class TEModel(Enum):
QWEN3_2B = 17
GEMMA_3_12B = 18
JINA_CLIP_2 = 19
+ QWEN3_8B = 20
def detect_te_model(sd):
@@ -1089,6 +1091,8 @@ def detect_te_model(sd):
return TEModel.QWEN3_4B
elif weight.shape[0] == 2048:
return TEModel.QWEN3_2B
+ elif weight.shape[0] == 4096:
+ return TEModel.QWEN3_8B
if weight.shape[0] == 5120:
if "model.layers.39.post_attention_layernorm.weight" in sd:
return TEModel.MISTRAL3_24B
@@ -1214,11 +1218,18 @@ def load_text_encoder_state_dicts(state_dicts=[], embedding_directory=None, clip
clip_target.tokenizer = comfy.text_encoders.flux.Flux2Tokenizer
tokenizer_data["tekken_model"] = clip_data[0].get("tekken_model", None)
elif te_model == TEModel.QWEN3_4B:
- clip_target.clip = comfy.text_encoders.z_image.te(**llama_detect(clip_data))
- clip_target.tokenizer = comfy.text_encoders.z_image.ZImageTokenizer
+ if clip_type == CLIPType.FLUX or clip_type == CLIPType.FLUX2:
+ clip_target.clip = comfy.text_encoders.flux.klein_te(**llama_detect(clip_data), model_type="qwen3_4b")
+ clip_target.tokenizer = comfy.text_encoders.flux.KleinTokenizer
+ else:
+ clip_target.clip = comfy.text_encoders.z_image.te(**llama_detect(clip_data))
+ clip_target.tokenizer = comfy.text_encoders.z_image.ZImageTokenizer
elif te_model == TEModel.QWEN3_2B:
clip_target.clip = comfy.text_encoders.ovis.te(**llama_detect(clip_data))
clip_target.tokenizer = comfy.text_encoders.ovis.OvisTokenizer
+ elif te_model == TEModel.QWEN3_8B:
+ clip_target.clip = comfy.text_encoders.flux.klein_te(**llama_detect(clip_data), model_type="qwen3_8b")
+ clip_target.tokenizer = comfy.text_encoders.flux.KleinTokenizer8B
elif te_model == TEModel.JINA_CLIP_2:
clip_target.clip = comfy.text_encoders.jina_clip_2.JinaClip2TextModelWrapper
clip_target.tokenizer = comfy.text_encoders.jina_clip_2.JinaClip2TokenizerWrapper
diff --git a/comfy/text_encoders/flux.py b/comfy/text_encoders/flux.py
index 21d93d757..4075afca4 100644
--- a/comfy/text_encoders/flux.py
+++ b/comfy/text_encoders/flux.py
@@ -3,7 +3,7 @@ import comfy.text_encoders.t5
import comfy.text_encoders.sd3_clip
import comfy.text_encoders.llama
import comfy.model_management
-from transformers import T5TokenizerFast, LlamaTokenizerFast
+from transformers import T5TokenizerFast, LlamaTokenizerFast, Qwen2Tokenizer
import torch
import os
import json
@@ -172,3 +172,60 @@ def flux2_te(dtype_llama=None, llama_quantization_metadata=None, pruned=False):
model_options["num_layers"] = 30
super().__init__(device=device, dtype=dtype, model_options=model_options)
return Flux2TEModel_
+
+class Qwen3Tokenizer(sd1_clip.SDTokenizer):
+ def __init__(self, embedding_directory=None, tokenizer_data={}):
+ tokenizer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "qwen25_tokenizer")
+ super().__init__(tokenizer_path, pad_with_end=False, embedding_size=2560, embedding_key='qwen3_4b', tokenizer_class=Qwen2Tokenizer, has_start_token=False, has_end_token=False, pad_to_max_length=False, max_length=99999999, min_length=512, pad_token=151643, tokenizer_data=tokenizer_data)
+
+class Qwen3Tokenizer8B(sd1_clip.SDTokenizer):
+ def __init__(self, embedding_directory=None, tokenizer_data={}):
+ tokenizer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "qwen25_tokenizer")
+ super().__init__(tokenizer_path, pad_with_end=False, embedding_size=4096, embedding_key='qwen3_8b', tokenizer_class=Qwen2Tokenizer, has_start_token=False, has_end_token=False, pad_to_max_length=False, max_length=99999999, min_length=512, pad_token=151643, tokenizer_data=tokenizer_data)
+
+class KleinTokenizer(sd1_clip.SD1Tokenizer):
+ def __init__(self, embedding_directory=None, tokenizer_data={}, name="qwen3_4b"):
+ if name == "qwen3_4b":
+ tokenizer = Qwen3Tokenizer
+ elif name == "qwen3_8b":
+ tokenizer = Qwen3Tokenizer8B
+
+ super().__init__(embedding_directory=embedding_directory, tokenizer_data=tokenizer_data, name=name, tokenizer=tokenizer)
+ self.llama_template = "<|im_start|>user\n{}<|im_end|>\n<|im_start|>assistant\n\n\n\n\n"
+
+ def tokenize_with_weights(self, text, return_word_ids=False, llama_template=None, **kwargs):
+ if llama_template is None:
+ llama_text = self.llama_template.format(text)
+ else:
+ llama_text = llama_template.format(text)
+
+ tokens = super().tokenize_with_weights(llama_text, return_word_ids=return_word_ids, disable_weights=True, **kwargs)
+ return tokens
+
+class KleinTokenizer8B(KleinTokenizer):
+ def __init__(self, embedding_directory=None, tokenizer_data={}, name="qwen3_8b"):
+ super().__init__(embedding_directory=embedding_directory, tokenizer_data=tokenizer_data, name=name)
+
+class Qwen3_4BModel(sd1_clip.SDClipModel):
+ def __init__(self, device="cpu", layer=[9, 18, 27], layer_idx=None, dtype=None, attention_mask=True, model_options={}):
+ super().__init__(device=device, layer=layer, layer_idx=layer_idx, textmodel_json_config={}, dtype=dtype, special_tokens={"pad": 151643}, layer_norm_hidden_state=False, model_class=comfy.text_encoders.llama.Qwen3_4B, enable_attention_masks=attention_mask, return_attention_masks=attention_mask, model_options=model_options)
+
+class Qwen3_8BModel(sd1_clip.SDClipModel):
+ def __init__(self, device="cpu", layer=[9, 18, 27], layer_idx=None, dtype=None, attention_mask=True, model_options={}):
+ super().__init__(device=device, layer=layer, layer_idx=layer_idx, textmodel_json_config={}, dtype=dtype, special_tokens={"pad": 151643}, layer_norm_hidden_state=False, model_class=comfy.text_encoders.llama.Qwen3_8B, enable_attention_masks=attention_mask, return_attention_masks=attention_mask, model_options=model_options)
+
+def klein_te(dtype_llama=None, llama_quantization_metadata=None, model_type="qwen3_4b"):
+ if model_type == "qwen3_4b":
+ model = Qwen3_4BModel
+ elif model_type == "qwen3_8b":
+ model = Qwen3_8BModel
+
+ class Flux2TEModel_(Flux2TEModel):
+ def __init__(self, device="cpu", dtype=None, model_options={}):
+ if llama_quantization_metadata is not None:
+ model_options = model_options.copy()
+ model_options["quantization_metadata"] = llama_quantization_metadata
+ if dtype_llama is not None:
+ dtype = dtype_llama
+ super().__init__(device=device, dtype=dtype, name=model_type, model_options=model_options, clip_model=model)
+ return Flux2TEModel_
diff --git a/comfy/text_encoders/llama.py b/comfy/text_encoders/llama.py
index 76731576b..331a30f61 100644
--- a/comfy/text_encoders/llama.py
+++ b/comfy/text_encoders/llama.py
@@ -99,6 +99,28 @@ class Qwen3_4BConfig:
rope_scale = None
final_norm: bool = True
+@dataclass
+class Qwen3_8BConfig:
+ vocab_size: int = 151936
+ hidden_size: int = 4096
+ intermediate_size: int = 12288
+ num_hidden_layers: int = 36
+ num_attention_heads: int = 32
+ num_key_value_heads: int = 8
+ max_position_embeddings: int = 40960
+ rms_norm_eps: float = 1e-6
+ rope_theta: float = 1000000.0
+ transformer_type: str = "llama"
+ head_dim = 128
+ rms_norm_add = False
+ mlp_activation = "silu"
+ qkv_bias = False
+ rope_dims = None
+ q_norm = "gemma3"
+ k_norm = "gemma3"
+ rope_scale = None
+ final_norm: bool = True
+
@dataclass
class Ovis25_2BConfig:
vocab_size: int = 151936
@@ -628,6 +650,15 @@ class Qwen3_4B(BaseLlama, torch.nn.Module):
self.model = Llama2_(config, device=device, dtype=dtype, ops=operations)
self.dtype = dtype
+class Qwen3_8B(BaseLlama, torch.nn.Module):
+ def __init__(self, config_dict, dtype, device, operations):
+ super().__init__()
+ config = Qwen3_8BConfig(**config_dict)
+ self.num_layers = config.num_hidden_layers
+
+ self.model = Llama2_(config, device=device, dtype=dtype, ops=operations)
+ self.dtype = dtype
+
class Ovis25_2B(BaseLlama, torch.nn.Module):
def __init__(self, config_dict, dtype, device, operations):
super().__init__()
From 8f40b43e0204d5b9780f3e9618e140e929e80594 Mon Sep 17 00:00:00 2001
From: comfyanonymous
Date: Thu, 15 Jan 2026 10:57:35 -0500
Subject: [PATCH 06/10] ComfyUI v0.9.2
---
comfyui_version.py | 2 +-
pyproject.toml | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/comfyui_version.py b/comfyui_version.py
index 0c9871e35..dbb57b4e5 100644
--- a/comfyui_version.py
+++ b/comfyui_version.py
@@ -1,3 +1,3 @@
# This file is automatically generated by the build process when version is
# updated in pyproject.toml.
-__version__ = "0.9.1"
+__version__ = "0.9.2"
diff --git a/pyproject.toml b/pyproject.toml
index dc52218b4..9ea73da05 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[project]
name = "ComfyUI"
-version = "0.9.1"
+version = "0.9.2"
readme = "README.md"
license = { file = "LICENSE" }
requires-python = ">=3.10"
From 12918a5f789d11c7d3c9d9f732891337740fe96f Mon Sep 17 00:00:00 2001
From: ComfyUI Wiki
Date: Fri, 16 Jan 2026 03:08:21 +0800
Subject: [PATCH 07/10] chore: update workflow templates to v0.8.7 (#11896)
---
requirements.txt | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/requirements.txt b/requirements.txt
index 8650d28ec..624cd067b 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,5 +1,5 @@
comfyui-frontend-package==1.36.14
-comfyui-workflow-templates==0.8.4
+comfyui-workflow-templates==0.8.7
comfyui-embedded-docs==0.4.0
torch
torchsde
From 6125b3a5e7215bf01874e402525552a7f5657a41 Mon Sep 17 00:00:00 2001
From: ComfyUI Wiki
Date: Fri, 16 Jan 2026 05:12:13 +0800
Subject: [PATCH 08/10] Update workflow templates to v0.8.10 (#11899)
* chore: update workflow templates to v0.8.9
* Update requirements.txt
---
requirements.txt | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/requirements.txt b/requirements.txt
index 624cd067b..996701550 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,5 +1,5 @@
comfyui-frontend-package==1.36.14
-comfyui-workflow-templates==0.8.7
+comfyui-workflow-templates==0.8.10
comfyui-embedded-docs==0.4.0
torch
torchsde
From 4c816d5c698dafaa31f8fc2c08ab1d81f9bc3239 Mon Sep 17 00:00:00 2001
From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com>
Date: Thu, 15 Jan 2026 17:06:40 -0800
Subject: [PATCH 09/10] Adjust memory usage factor calculation for flux2 klein.
(#11900)
---
comfy/supported_models.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/comfy/supported_models.py b/comfy/supported_models.py
index 2c4c6b8fc..c8a7f6efb 100644
--- a/comfy/supported_models.py
+++ b/comfy/supported_models.py
@@ -763,7 +763,7 @@ class Flux2(Flux):
def __init__(self, unet_config):
super().__init__(unet_config)
- self.memory_usage_factor = self.memory_usage_factor * (2.0 * 2.0) * 2.36
+ self.memory_usage_factor = self.memory_usage_factor * (2.0 * 2.0) * (unet_config['hidden_size'] / 2604)
def get_model(self, state_dict, prefix="", device=None):
out = model_base.Flux2(self, device=device)
From 732b707397922dbbec5ed04ecca3c773c878c64e Mon Sep 17 00:00:00 2001
From: Jedrzej Kosinski
Date: Thu, 15 Jan 2026 20:15:15 -0800
Subject: [PATCH 10/10] Added try-except around seed_assets call in
get_object_info with a logging statement (#11901)
---
server.py | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/server.py b/server.py
index da2baefd4..04a577488 100644
--- a/server.py
+++ b/server.py
@@ -686,7 +686,10 @@ class PromptServer():
@routes.get("/object_info")
async def get_object_info(request):
- seed_assets(["models"])
+ try:
+ seed_assets(["models"])
+ except Exception as e:
+ logging.error(f"Failed to seed assets: {e}")
with folder_paths.cache_helper:
out = {}
for x in nodes.NODE_CLASS_MAPPINGS: