Merge branch 'master' into flipflop-stream

This commit is contained in:
Jedrzej Kosinski 2025-10-03 14:32:56 -07:00
commit 0fdd327c2f
15 changed files with 559 additions and 446 deletions

View File

@ -21,3 +21,28 @@ jobs:
- name: Run Ruff - name: Run Ruff
run: ruff check . run: ruff check .
pylint:
name: Run Pylint
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: '3.12'
- name: Install requirements
run: |
python -m pip install --upgrade pip
pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu
pip install -r requirements.txt
- name: Install Pylint
run: pip install pylint
- name: Run Pylint
run: pylint comfy_api_nodes

View File

@ -909,7 +909,7 @@ class MotionEncoder_tc(nn.Module):
def __init__(self, def __init__(self,
in_dim: int, in_dim: int,
hidden_dim: int, hidden_dim: int,
num_heads=int, num_heads: int,
need_global=True, need_global=True,
dtype=None, dtype=None,
device=None, device=None,

View File

@ -2,6 +2,7 @@
# filename: filtered-openapi.yaml # filename: filtered-openapi.yaml
# timestamp: 2025-07-30T08:54:00+00:00 # timestamp: 2025-07-30T08:54:00+00:00
# pylint: disable
from __future__ import annotations from __future__ import annotations
from datetime import date, datetime from datetime import date, datetime
@ -1320,6 +1321,7 @@ class KlingTextToVideoModelName(str, Enum):
kling_v1 = 'kling-v1' kling_v1 = 'kling-v1'
kling_v1_6 = 'kling-v1-6' kling_v1_6 = 'kling-v1-6'
kling_v2_1_master = 'kling-v2-1-master' kling_v2_1_master = 'kling-v2-1-master'
kling_v2_5_turbo = 'kling-v2-5-turbo'
class KlingVideoGenAspectRatio(str, Enum): class KlingVideoGenAspectRatio(str, Enum):
@ -1354,6 +1356,7 @@ class KlingVideoGenModelName(str, Enum):
kling_v2_master = 'kling-v2-master' kling_v2_master = 'kling-v2-master'
kling_v2_1 = 'kling-v2-1' kling_v2_1 = 'kling-v2-1'
kling_v2_1_master = 'kling-v2-1-master' kling_v2_1_master = 'kling-v2-1-master'
kling_v2_5_turbo = 'kling-v2-5-turbo'
class KlingVideoResult(BaseModel): class KlingVideoResult(BaseModel):

View File

@ -535,7 +535,7 @@ class ApiClient:
request_method="PUT", request_method="PUT",
request_url=upload_url, request_url=upload_url,
response_status_code=e.status if hasattr(e, "status") else None, response_status_code=e.status if hasattr(e, "status") else None,
response_headers=dict(e.headers) if getattr(e, "headers") else None, response_headers=dict(e.headers) if hasattr(e, "headers") else None,
response_content=None, response_content=None,
error_message=f"{type(e).__name__}: {str(e)}", error_message=f"{type(e).__name__}: {str(e)}",
) )

View File

@ -52,7 +52,3 @@ class RodinResourceItem(BaseModel):
class Rodin3DDownloadResponse(BaseModel): class Rodin3DDownloadResponse(BaseModel):
list: List[RodinResourceItem] = Field(..., description="Source List") list: List[RodinResourceItem] = Field(..., description="Source List")

View File

@ -490,7 +490,6 @@ class GeminiInputFiles(ComfyNodeABC):
# Use base64 string directly, not the data URI # Use base64 string directly, not the data URI
with open(file_path, "rb") as f: with open(file_path, "rb") as f:
file_content = f.read() file_content = f.read()
import base64
base64_str = base64.b64encode(file_content).decode("utf-8") base64_str = base64.b64encode(file_content).decode("utf-8")
return GeminiPart( return GeminiPart(

View File

@ -423,6 +423,8 @@ class KlingTextToVideoNode(KlingNodeBase):
"standard mode / 10s duration / kling-v2-master": ("std", "10", "kling-v2-master"), "standard mode / 10s duration / kling-v2-master": ("std", "10", "kling-v2-master"),
"pro mode / 5s duration / kling-v2-1-master": ("pro", "5", "kling-v2-1-master"), "pro mode / 5s duration / kling-v2-1-master": ("pro", "5", "kling-v2-1-master"),
"pro mode / 10s duration / kling-v2-1-master": ("pro", "10", "kling-v2-1-master"), "pro mode / 10s duration / kling-v2-1-master": ("pro", "10", "kling-v2-1-master"),
"pro mode / 5s duration / kling-v2-5-turbo": ("pro", "5", "kling-v2-5-turbo"),
"pro mode / 10s duration / kling-v2-5-turbo": ("pro", "10", "kling-v2-5-turbo"),
} }
@classmethod @classmethod

View File

@ -1,5 +1,7 @@
from inspect import cleandoc from inspect import cleandoc
from typing import Optional from typing import Optional
from typing_extensions import override
from io import BytesIO
from comfy_api_nodes.apis.pixverse_api import ( from comfy_api_nodes.apis.pixverse_api import (
PixverseTextVideoRequest, PixverseTextVideoRequest,
PixverseImageVideoRequest, PixverseImageVideoRequest,
@ -26,12 +28,11 @@ from comfy_api_nodes.apinode_utils import (
tensor_to_bytesio, tensor_to_bytesio,
validate_string, validate_string,
) )
from comfy.comfy_types.node_typing import IO, ComfyNodeABC
from comfy_api.input_impl import VideoFromFile from comfy_api.input_impl import VideoFromFile
from comfy_api.latest import ComfyExtension, io as comfy_io
import torch import torch
import aiohttp import aiohttp
from io import BytesIO
AVERAGE_DURATION_T2V = 32 AVERAGE_DURATION_T2V = 32
@ -72,100 +73,101 @@ async def upload_image_to_pixverse(image: torch.Tensor, auth_kwargs=None):
return response_upload.Resp.img_id return response_upload.Resp.img_id
class PixverseTemplateNode: class PixverseTemplateNode(comfy_io.ComfyNode):
""" """
Select template for PixVerse Video generation. Select template for PixVerse Video generation.
""" """
RETURN_TYPES = (PixverseIO.TEMPLATE,) @classmethod
RETURN_NAMES = ("pixverse_template",) def define_schema(cls) -> comfy_io.Schema:
FUNCTION = "create_template" return comfy_io.Schema(
CATEGORY = "api node/video/PixVerse" node_id="PixverseTemplateNode",
display_name="PixVerse Template",
category="api node/video/PixVerse",
inputs=[
comfy_io.Combo.Input("template", options=[list(pixverse_templates.keys())]),
],
outputs=[comfy_io.Custom(PixverseIO.TEMPLATE).Output(display_name="pixverse_template")],
)
@classmethod @classmethod
def INPUT_TYPES(s): def execute(cls, template: str) -> comfy_io.NodeOutput:
return {
"required": {
"template": (list(pixverse_templates.keys()),),
}
}
def create_template(self, template: str):
template_id = pixverse_templates.get(template, None) template_id = pixverse_templates.get(template, None)
if template_id is None: if template_id is None:
raise Exception(f"Template '{template}' is not recognized.") raise Exception(f"Template '{template}' is not recognized.")
# just return the integer # just return the integer
return (template_id,) return comfy_io.NodeOutput(template_id)
class PixverseTextToVideoNode(ComfyNodeABC): class PixverseTextToVideoNode(comfy_io.ComfyNode):
""" """
Generates videos based on prompt and output_size. Generates videos based on prompt and output_size.
""" """
RETURN_TYPES = (IO.VIDEO,) @classmethod
DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value def define_schema(cls) -> comfy_io.Schema:
FUNCTION = "api_call" return comfy_io.Schema(
API_NODE = True node_id="PixverseTextToVideoNode",
CATEGORY = "api node/video/PixVerse" display_name="PixVerse Text to Video",
category="api node/video/PixVerse",
description=cleandoc(cls.__doc__ or ""),
inputs=[
comfy_io.String.Input(
"prompt",
multiline=True,
default="",
tooltip="Prompt for the video generation",
),
comfy_io.Combo.Input(
"aspect_ratio",
options=[ratio.value for ratio in PixverseAspectRatio],
),
comfy_io.Combo.Input(
"quality",
options=[resolution.value for resolution in PixverseQuality],
default=PixverseQuality.res_540p,
),
comfy_io.Combo.Input(
"duration_seconds",
options=[dur.value for dur in PixverseDuration],
),
comfy_io.Combo.Input(
"motion_mode",
options=[mode.value for mode in PixverseMotionMode],
),
comfy_io.Int.Input(
"seed",
default=0,
min=0,
max=2147483647,
control_after_generate=True,
tooltip="Seed for video generation.",
),
comfy_io.String.Input(
"negative_prompt",
default="",
force_input=True,
tooltip="An optional text description of undesired elements on an image.",
optional=True,
),
comfy_io.Custom(PixverseIO.TEMPLATE).Input(
"pixverse_template",
tooltip="An optional template to influence style of generation, created by the PixVerse Template node.",
optional=True,
),
],
outputs=[comfy_io.Video.Output()],
hidden=[
comfy_io.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id,
],
is_api_node=True,
)
@classmethod @classmethod
def INPUT_TYPES(s): async def execute(
return { cls,
"required": {
"prompt": (
IO.STRING,
{
"multiline": True,
"default": "",
"tooltip": "Prompt for the video generation",
},
),
"aspect_ratio": ([ratio.value for ratio in PixverseAspectRatio],),
"quality": (
[resolution.value for resolution in PixverseQuality],
{
"default": PixverseQuality.res_540p,
},
),
"duration_seconds": ([dur.value for dur in PixverseDuration],),
"motion_mode": ([mode.value for mode in PixverseMotionMode],),
"seed": (
IO.INT,
{
"default": 0,
"min": 0,
"max": 2147483647,
"control_after_generate": True,
"tooltip": "Seed for video generation.",
},
),
},
"optional": {
"negative_prompt": (
IO.STRING,
{
"default": "",
"forceInput": True,
"tooltip": "An optional text description of undesired elements on an image.",
},
),
"pixverse_template": (
PixverseIO.TEMPLATE,
{
"tooltip": "An optional template to influence style of generation, created by the PixVerse Template node."
},
),
},
"hidden": {
"auth_token": "AUTH_TOKEN_COMFY_ORG",
"comfy_api_key": "API_KEY_COMFY_ORG",
"unique_id": "UNIQUE_ID",
},
}
async def api_call(
self,
prompt: str, prompt: str,
aspect_ratio: str, aspect_ratio: str,
quality: str, quality: str,
@ -174,9 +176,7 @@ class PixverseTextToVideoNode(ComfyNodeABC):
seed, seed,
negative_prompt: str = None, negative_prompt: str = None,
pixverse_template: int = None, pixverse_template: int = None,
unique_id: Optional[str] = None, ) -> comfy_io.NodeOutput:
**kwargs,
):
validate_string(prompt, strip_whitespace=False) validate_string(prompt, strip_whitespace=False)
# 1080p is limited to 5 seconds duration # 1080p is limited to 5 seconds duration
# only normal motion_mode supported for 1080p or for non-5 second duration # only normal motion_mode supported for 1080p or for non-5 second duration
@ -186,6 +186,10 @@ class PixverseTextToVideoNode(ComfyNodeABC):
elif duration_seconds != PixverseDuration.dur_5: elif duration_seconds != PixverseDuration.dur_5:
motion_mode = PixverseMotionMode.normal motion_mode = PixverseMotionMode.normal
auth = {
"auth_token": cls.hidden.auth_token_comfy_org,
"comfy_api_key": cls.hidden.api_key_comfy_org,
}
operation = SynchronousOperation( operation = SynchronousOperation(
endpoint=ApiEndpoint( endpoint=ApiEndpoint(
path="/proxy/pixverse/video/text/generate", path="/proxy/pixverse/video/text/generate",
@ -203,7 +207,7 @@ class PixverseTextToVideoNode(ComfyNodeABC):
template_id=pixverse_template, template_id=pixverse_template,
seed=seed, seed=seed,
), ),
auth_kwargs=kwargs, auth_kwargs=auth,
) )
response_api = await operation.execute() response_api = await operation.execute()
@ -224,8 +228,8 @@ class PixverseTextToVideoNode(ComfyNodeABC):
PixverseStatus.deleted, PixverseStatus.deleted,
], ],
status_extractor=lambda x: x.Resp.status, status_extractor=lambda x: x.Resp.status,
auth_kwargs=kwargs, auth_kwargs=auth,
node_id=unique_id, node_id=cls.hidden.unique_id,
result_url_extractor=get_video_url_from_response, result_url_extractor=get_video_url_from_response,
estimated_duration=AVERAGE_DURATION_T2V, estimated_duration=AVERAGE_DURATION_T2V,
) )
@ -233,77 +237,75 @@ class PixverseTextToVideoNode(ComfyNodeABC):
async with aiohttp.ClientSession() as session: async with aiohttp.ClientSession() as session:
async with session.get(response_poll.Resp.url) as vid_response: async with session.get(response_poll.Resp.url) as vid_response:
return (VideoFromFile(BytesIO(await vid_response.content.read())),) return comfy_io.NodeOutput(VideoFromFile(BytesIO(await vid_response.content.read())))
class PixverseImageToVideoNode(ComfyNodeABC): class PixverseImageToVideoNode(comfy_io.ComfyNode):
""" """
Generates videos based on prompt and output_size. Generates videos based on prompt and output_size.
""" """
RETURN_TYPES = (IO.VIDEO,) @classmethod
DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value def define_schema(cls) -> comfy_io.Schema:
FUNCTION = "api_call" return comfy_io.Schema(
API_NODE = True node_id="PixverseImageToVideoNode",
CATEGORY = "api node/video/PixVerse" display_name="PixVerse Image to Video",
category="api node/video/PixVerse",
description=cleandoc(cls.__doc__ or ""),
inputs=[
comfy_io.Image.Input("image"),
comfy_io.String.Input(
"prompt",
multiline=True,
default="",
tooltip="Prompt for the video generation",
),
comfy_io.Combo.Input(
"quality",
options=[resolution.value for resolution in PixverseQuality],
default=PixverseQuality.res_540p,
),
comfy_io.Combo.Input(
"duration_seconds",
options=[dur.value for dur in PixverseDuration],
),
comfy_io.Combo.Input(
"motion_mode",
options=[mode.value for mode in PixverseMotionMode],
),
comfy_io.Int.Input(
"seed",
default=0,
min=0,
max=2147483647,
control_after_generate=True,
tooltip="Seed for video generation.",
),
comfy_io.String.Input(
"negative_prompt",
default="",
force_input=True,
tooltip="An optional text description of undesired elements on an image.",
optional=True,
),
comfy_io.Custom(PixverseIO.TEMPLATE).Input(
"pixverse_template",
tooltip="An optional template to influence style of generation, created by the PixVerse Template node.",
optional=True,
),
],
outputs=[comfy_io.Video.Output()],
hidden=[
comfy_io.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id,
],
is_api_node=True,
)
@classmethod @classmethod
def INPUT_TYPES(s): async def execute(
return { cls,
"required": {
"image": (IO.IMAGE,),
"prompt": (
IO.STRING,
{
"multiline": True,
"default": "",
"tooltip": "Prompt for the video generation",
},
),
"quality": (
[resolution.value for resolution in PixverseQuality],
{
"default": PixverseQuality.res_540p,
},
),
"duration_seconds": ([dur.value for dur in PixverseDuration],),
"motion_mode": ([mode.value for mode in PixverseMotionMode],),
"seed": (
IO.INT,
{
"default": 0,
"min": 0,
"max": 2147483647,
"control_after_generate": True,
"tooltip": "Seed for video generation.",
},
),
},
"optional": {
"negative_prompt": (
IO.STRING,
{
"default": "",
"forceInput": True,
"tooltip": "An optional text description of undesired elements on an image.",
},
),
"pixverse_template": (
PixverseIO.TEMPLATE,
{
"tooltip": "An optional template to influence style of generation, created by the PixVerse Template node."
},
),
},
"hidden": {
"auth_token": "AUTH_TOKEN_COMFY_ORG",
"comfy_api_key": "API_KEY_COMFY_ORG",
"unique_id": "UNIQUE_ID",
},
}
async def api_call(
self,
image: torch.Tensor, image: torch.Tensor,
prompt: str, prompt: str,
quality: str, quality: str,
@ -312,11 +314,13 @@ class PixverseImageToVideoNode(ComfyNodeABC):
seed, seed,
negative_prompt: str = None, negative_prompt: str = None,
pixverse_template: int = None, pixverse_template: int = None,
unique_id: Optional[str] = None, ) -> comfy_io.NodeOutput:
**kwargs,
):
validate_string(prompt, strip_whitespace=False) validate_string(prompt, strip_whitespace=False)
img_id = await upload_image_to_pixverse(image, auth_kwargs=kwargs) auth = {
"auth_token": cls.hidden.auth_token_comfy_org,
"comfy_api_key": cls.hidden.api_key_comfy_org,
}
img_id = await upload_image_to_pixverse(image, auth_kwargs=auth)
# 1080p is limited to 5 seconds duration # 1080p is limited to 5 seconds duration
# only normal motion_mode supported for 1080p or for non-5 second duration # only normal motion_mode supported for 1080p or for non-5 second duration
@ -343,7 +347,7 @@ class PixverseImageToVideoNode(ComfyNodeABC):
template_id=pixverse_template, template_id=pixverse_template,
seed=seed, seed=seed,
), ),
auth_kwargs=kwargs, auth_kwargs=auth,
) )
response_api = await operation.execute() response_api = await operation.execute()
@ -364,8 +368,8 @@ class PixverseImageToVideoNode(ComfyNodeABC):
PixverseStatus.deleted, PixverseStatus.deleted,
], ],
status_extractor=lambda x: x.Resp.status, status_extractor=lambda x: x.Resp.status,
auth_kwargs=kwargs, auth_kwargs=auth,
node_id=unique_id, node_id=cls.hidden.unique_id,
result_url_extractor=get_video_url_from_response, result_url_extractor=get_video_url_from_response,
estimated_duration=AVERAGE_DURATION_I2V, estimated_duration=AVERAGE_DURATION_I2V,
) )
@ -373,72 +377,71 @@ class PixverseImageToVideoNode(ComfyNodeABC):
async with aiohttp.ClientSession() as session: async with aiohttp.ClientSession() as session:
async with session.get(response_poll.Resp.url) as vid_response: async with session.get(response_poll.Resp.url) as vid_response:
return (VideoFromFile(BytesIO(await vid_response.content.read())),) return comfy_io.NodeOutput(VideoFromFile(BytesIO(await vid_response.content.read())))
class PixverseTransitionVideoNode(ComfyNodeABC): class PixverseTransitionVideoNode(comfy_io.ComfyNode):
""" """
Generates videos based on prompt and output_size. Generates videos based on prompt and output_size.
""" """
RETURN_TYPES = (IO.VIDEO,) @classmethod
DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value def define_schema(cls) -> comfy_io.Schema:
FUNCTION = "api_call" return comfy_io.Schema(
API_NODE = True node_id="PixverseTransitionVideoNode",
CATEGORY = "api node/video/PixVerse" display_name="PixVerse Transition Video",
category="api node/video/PixVerse",
description=cleandoc(cls.__doc__ or ""),
inputs=[
comfy_io.Image.Input("first_frame"),
comfy_io.Image.Input("last_frame"),
comfy_io.String.Input(
"prompt",
multiline=True,
default="",
tooltip="Prompt for the video generation",
),
comfy_io.Combo.Input(
"quality",
options=[resolution.value for resolution in PixverseQuality],
default=PixverseQuality.res_540p,
),
comfy_io.Combo.Input(
"duration_seconds",
options=[dur.value for dur in PixverseDuration],
),
comfy_io.Combo.Input(
"motion_mode",
options=[mode.value for mode in PixverseMotionMode],
),
comfy_io.Int.Input(
"seed",
default=0,
min=0,
max=2147483647,
control_after_generate=True,
tooltip="Seed for video generation.",
),
comfy_io.String.Input(
"negative_prompt",
default="",
force_input=True,
tooltip="An optional text description of undesired elements on an image.",
optional=True,
),
],
outputs=[comfy_io.Video.Output()],
hidden=[
comfy_io.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id,
],
is_api_node=True,
)
@classmethod @classmethod
def INPUT_TYPES(s): async def execute(
return { cls,
"required": {
"first_frame": (IO.IMAGE,),
"last_frame": (IO.IMAGE,),
"prompt": (
IO.STRING,
{
"multiline": True,
"default": "",
"tooltip": "Prompt for the video generation",
},
),
"quality": (
[resolution.value for resolution in PixverseQuality],
{
"default": PixverseQuality.res_540p,
},
),
"duration_seconds": ([dur.value for dur in PixverseDuration],),
"motion_mode": ([mode.value for mode in PixverseMotionMode],),
"seed": (
IO.INT,
{
"default": 0,
"min": 0,
"max": 2147483647,
"control_after_generate": True,
"tooltip": "Seed for video generation.",
},
),
},
"optional": {
"negative_prompt": (
IO.STRING,
{
"default": "",
"forceInput": True,
"tooltip": "An optional text description of undesired elements on an image.",
},
),
},
"hidden": {
"auth_token": "AUTH_TOKEN_COMFY_ORG",
"comfy_api_key": "API_KEY_COMFY_ORG",
"unique_id": "UNIQUE_ID",
},
}
async def api_call(
self,
first_frame: torch.Tensor, first_frame: torch.Tensor,
last_frame: torch.Tensor, last_frame: torch.Tensor,
prompt: str, prompt: str,
@ -447,12 +450,14 @@ class PixverseTransitionVideoNode(ComfyNodeABC):
motion_mode: str, motion_mode: str,
seed, seed,
negative_prompt: str = None, negative_prompt: str = None,
unique_id: Optional[str] = None, ) -> comfy_io.NodeOutput:
**kwargs,
):
validate_string(prompt, strip_whitespace=False) validate_string(prompt, strip_whitespace=False)
first_frame_id = await upload_image_to_pixverse(first_frame, auth_kwargs=kwargs) auth = {
last_frame_id = await upload_image_to_pixverse(last_frame, auth_kwargs=kwargs) "auth_token": cls.hidden.auth_token_comfy_org,
"comfy_api_key": cls.hidden.api_key_comfy_org,
}
first_frame_id = await upload_image_to_pixverse(first_frame, auth_kwargs=auth)
last_frame_id = await upload_image_to_pixverse(last_frame, auth_kwargs=auth)
# 1080p is limited to 5 seconds duration # 1080p is limited to 5 seconds duration
# only normal motion_mode supported for 1080p or for non-5 second duration # only normal motion_mode supported for 1080p or for non-5 second duration
@ -479,7 +484,7 @@ class PixverseTransitionVideoNode(ComfyNodeABC):
negative_prompt=negative_prompt if negative_prompt else None, negative_prompt=negative_prompt if negative_prompt else None,
seed=seed, seed=seed,
), ),
auth_kwargs=kwargs, auth_kwargs=auth,
) )
response_api = await operation.execute() response_api = await operation.execute()
@ -500,8 +505,8 @@ class PixverseTransitionVideoNode(ComfyNodeABC):
PixverseStatus.deleted, PixverseStatus.deleted,
], ],
status_extractor=lambda x: x.Resp.status, status_extractor=lambda x: x.Resp.status,
auth_kwargs=kwargs, auth_kwargs=auth,
node_id=unique_id, node_id=cls.hidden.unique_id,
result_url_extractor=get_video_url_from_response, result_url_extractor=get_video_url_from_response,
estimated_duration=AVERAGE_DURATION_T2V, estimated_duration=AVERAGE_DURATION_T2V,
) )
@ -509,19 +514,19 @@ class PixverseTransitionVideoNode(ComfyNodeABC):
async with aiohttp.ClientSession() as session: async with aiohttp.ClientSession() as session:
async with session.get(response_poll.Resp.url) as vid_response: async with session.get(response_poll.Resp.url) as vid_response:
return (VideoFromFile(BytesIO(await vid_response.content.read())),) return comfy_io.NodeOutput(VideoFromFile(BytesIO(await vid_response.content.read())))
NODE_CLASS_MAPPINGS = { class PixVerseExtension(ComfyExtension):
"PixverseTextToVideoNode": PixverseTextToVideoNode, @override
"PixverseImageToVideoNode": PixverseImageToVideoNode, async def get_node_list(self) -> list[type[comfy_io.ComfyNode]]:
"PixverseTransitionVideoNode": PixverseTransitionVideoNode, return [
"PixverseTemplateNode": PixverseTemplateNode, PixverseTextToVideoNode,
} PixverseImageToVideoNode,
PixverseTransitionVideoNode,
PixverseTemplateNode,
]
NODE_DISPLAY_NAME_MAPPINGS = {
"PixverseTextToVideoNode": "PixVerse Text to Video", async def comfy_entrypoint() -> PixVerseExtension:
"PixverseImageToVideoNode": "PixVerse Image to Video", return PixVerseExtension()
"PixverseTransitionVideoNode": "PixVerse Transition Video",
"PixverseTemplateNode": "PixVerse Template",
}

View File

@ -1,26 +1,38 @@
import node_helpers import node_helpers
from typing_extensions import override
from comfy_api.latest import ComfyExtension, io
class ReferenceLatent: class ReferenceLatent(io.ComfyNode):
@classmethod @classmethod
def INPUT_TYPES(s): def define_schema(cls):
return {"required": {"conditioning": ("CONDITIONING", ), return io.Schema(
}, node_id="ReferenceLatent",
"optional": {"latent": ("LATENT", ),} category="advanced/conditioning/edit_models",
} description="This node sets the guiding latent for an edit model. If the model supports it you can chain multiple to set multiple reference images.",
inputs=[
io.Conditioning.Input("conditioning"),
io.Latent.Input("latent", optional=True),
],
outputs=[
io.Conditioning.Output(),
]
)
RETURN_TYPES = ("CONDITIONING",) @classmethod
FUNCTION = "append" def execute(cls, conditioning, latent=None) -> io.NodeOutput:
CATEGORY = "advanced/conditioning/edit_models"
DESCRIPTION = "This node sets the guiding latent for an edit model. If the model supports it you can chain multiple to set multiple reference images."
def append(self, conditioning, latent=None):
if latent is not None: if latent is not None:
conditioning = node_helpers.conditioning_set_values(conditioning, {"reference_latents": [latent["samples"]]}, append=True) conditioning = node_helpers.conditioning_set_values(conditioning, {"reference_latents": [latent["samples"]]}, append=True)
return (conditioning, ) return io.NodeOutput(conditioning)
NODE_CLASS_MAPPINGS = { class EditModelExtension(ComfyExtension):
"ReferenceLatent": ReferenceLatent, @override
} async def get_node_list(self) -> list[type[io.ComfyNode]]:
return [
ReferenceLatent,
]
def comfy_entrypoint() -> EditModelExtension:
return EditModelExtension()

View File

@ -1,4 +1,9 @@
class EpsilonScaling: from typing_extensions import override
from comfy_api.latest import ComfyExtension, io
class EpsilonScaling(io.ComfyNode):
""" """
Implements the Epsilon Scaling method from 'Elucidating the Exposure Bias in Diffusion Models' Implements the Epsilon Scaling method from 'Elucidating the Exposure Bias in Diffusion Models'
(https://arxiv.org/abs/2308.15321v6). (https://arxiv.org/abs/2308.15321v6).
@ -8,26 +13,28 @@ class EpsilonScaling:
recommended by the paper for its practicality and effectiveness. recommended by the paper for its practicality and effectiveness.
""" """
@classmethod @classmethod
def INPUT_TYPES(s): def define_schema(cls):
return { return io.Schema(
"required": { node_id="Epsilon Scaling",
"model": ("MODEL",), category="model_patches/unet",
"scaling_factor": ("FLOAT", { inputs=[
"default": 1.005, io.Model.Input("model"),
"min": 0.5, io.Float.Input(
"max": 1.5, "scaling_factor",
"step": 0.001, default=1.005,
"display": "number" min=0.5,
}), max=1.5,
} step=0.001,
} display_mode=io.NumberDisplay.number,
),
],
outputs=[
io.Model.Output(),
],
)
RETURN_TYPES = ("MODEL",) @classmethod
FUNCTION = "patch" def execute(cls, model, scaling_factor) -> io.NodeOutput:
CATEGORY = "model_patches/unet"
def patch(self, model, scaling_factor):
# Prevent division by zero, though the UI's min value should prevent this. # Prevent division by zero, though the UI's min value should prevent this.
if scaling_factor == 0: if scaling_factor == 0:
scaling_factor = 1e-9 scaling_factor = 1e-9
@ -53,8 +60,15 @@ class EpsilonScaling:
model_clone.set_model_sampler_post_cfg_function(epsilon_scaling_function) model_clone.set_model_sampler_post_cfg_function(epsilon_scaling_function)
return (model_clone,) return io.NodeOutput(model_clone)
NODE_CLASS_MAPPINGS = {
"Epsilon Scaling": EpsilonScaling class EpsilonScalingExtension(ComfyExtension):
} @override
async def get_node_list(self) -> list[type[io.ComfyNode]]:
return [
EpsilonScaling,
]
async def comfy_entrypoint() -> EpsilonScalingExtension:
return EpsilonScalingExtension()

View File

@ -1,7 +1,9 @@
#Taken from: https://github.com/dbolya/tomesd #Taken from: https://github.com/dbolya/tomesd
import torch import torch
from typing import Tuple, Callable from typing import Tuple, Callable, Optional
from typing_extensions import override
from comfy_api.latest import ComfyExtension, io
import math import math
def do_nothing(x: torch.Tensor, mode:str=None): def do_nothing(x: torch.Tensor, mode:str=None):
@ -144,33 +146,45 @@ def get_functions(x, ratio, original_shape):
class TomePatchModel: class TomePatchModel(io.ComfyNode):
@classmethod @classmethod
def INPUT_TYPES(s): def define_schema(cls):
return {"required": { "model": ("MODEL",), return io.Schema(
"ratio": ("FLOAT", {"default": 0.3, "min": 0.0, "max": 1.0, "step": 0.01}), node_id="TomePatchModel",
}} category="model_patches/unet",
RETURN_TYPES = ("MODEL",) inputs=[
FUNCTION = "patch" io.Model.Input("model"),
io.Float.Input("ratio", default=0.3, min=0.0, max=1.0, step=0.01),
],
outputs=[io.Model.Output()],
)
CATEGORY = "model_patches/unet" @classmethod
def execute(cls, model, ratio) -> io.NodeOutput:
def patch(self, model, ratio): u: Optional[Callable] = None
self.u = None
def tomesd_m(q, k, v, extra_options): def tomesd_m(q, k, v, extra_options):
nonlocal u
#NOTE: In the reference code get_functions takes x (input of the transformer block) as the argument instead of q #NOTE: In the reference code get_functions takes x (input of the transformer block) as the argument instead of q
#however from my basic testing it seems that using q instead gives better results #however from my basic testing it seems that using q instead gives better results
m, self.u = get_functions(q, ratio, extra_options["original_shape"]) m, u = get_functions(q, ratio, extra_options["original_shape"])
return m(q), k, v return m(q), k, v
def tomesd_u(n, extra_options): def tomesd_u(n, extra_options):
return self.u(n) nonlocal u
return u(n)
m = model.clone() m = model.clone()
m.set_model_attn1_patch(tomesd_m) m.set_model_attn1_patch(tomesd_m)
m.set_model_attn1_output_patch(tomesd_u) m.set_model_attn1_output_patch(tomesd_u)
return (m, ) return io.NodeOutput(m)
NODE_CLASS_MAPPINGS = { class TomePatchModelExtension(ComfyExtension):
"TomePatchModel": TomePatchModel, @override
} async def get_node_list(self) -> list[type[io.ComfyNode]]:
return [
TomePatchModel,
]
async def comfy_entrypoint() -> TomePatchModelExtension:
return TomePatchModelExtension()

View File

@ -1,23 +1,39 @@
from typing_extensions import override
from comfy_api.latest import ComfyExtension, io
from comfy_api.torch_helpers import set_torch_compile_wrapper from comfy_api.torch_helpers import set_torch_compile_wrapper
class TorchCompileModel: class TorchCompileModel(io.ComfyNode):
@classmethod @classmethod
def INPUT_TYPES(s): def define_schema(cls) -> io.Schema:
return {"required": { "model": ("MODEL",), return io.Schema(
"backend": (["inductor", "cudagraphs"],), node_id="TorchCompileModel",
}} category="_for_testing",
RETURN_TYPES = ("MODEL",) inputs=[
FUNCTION = "patch" io.Model.Input("model"),
io.Combo.Input(
"backend",
options=["inductor", "cudagraphs"],
),
],
outputs=[io.Model.Output()],
is_experimental=True,
)
CATEGORY = "_for_testing" @classmethod
EXPERIMENTAL = True def execute(cls, model, backend) -> io.NodeOutput:
def patch(self, model, backend):
m = model.clone() m = model.clone()
set_torch_compile_wrapper(model=m, backend=backend) set_torch_compile_wrapper(model=m, backend=backend)
return (m, ) return io.NodeOutput(m)
NODE_CLASS_MAPPINGS = {
"TorchCompileModel": TorchCompileModel, class TorchCompileExtension(ComfyExtension):
} @override
async def get_node_list(self) -> list[type[io.ComfyNode]]:
return [
TorchCompileModel,
]
async def comfy_entrypoint() -> TorchCompileExtension:
return TorchCompileExtension()

View File

@ -1,96 +1,70 @@
class Example: from typing_extensions import override
from comfy_api.latest import ComfyExtension, io
class Example(io.ComfyNode):
""" """
A example node An example node
Class methods Class methods
------------- -------------
INPUT_TYPES (dict): define_schema (io.Schema):
Tell the main program input parameters of nodes. Tell the main program the metadata, input, output parameters of nodes.
IS_CHANGED: fingerprint_inputs:
optional method to control when the node is re executed. optional method to control when the node is re executed.
check_lazy_status:
optional method to control list of input names that need to be evaluated.
Attributes
----------
RETURN_TYPES (`tuple`):
The type of each element in the output tuple.
RETURN_NAMES (`tuple`):
Optional: The name of each output in the output tuple.
FUNCTION (`str`):
The name of the entry-point method. For example, if `FUNCTION = "execute"` then it will run Example().execute()
OUTPUT_NODE ([`bool`]):
If this node is an output node that outputs a result/image from the graph. The SaveImage node is an example.
The backend iterates on these output nodes and tries to execute all their parents if their parent graph is properly connected.
Assumed to be False if not present.
CATEGORY (`str`):
The category the node should appear in the UI.
DEPRECATED (`bool`):
Indicates whether the node is deprecated. Deprecated nodes are hidden by default in the UI, but remain
functional in existing workflows that use them.
EXPERIMENTAL (`bool`):
Indicates whether the node is experimental. Experimental nodes are marked as such in the UI and may be subject to
significant changes or removal in future versions. Use with caution in production workflows.
execute(s) -> tuple || None:
The entry point method. The name of this method must be the same as the value of property `FUNCTION`.
For example, if `FUNCTION = "execute"` then this method's name must be `execute`, if `FUNCTION = "foo"` then it must be `foo`.
""" """
def __init__(self):
pass
@classmethod @classmethod
def INPUT_TYPES(s): def define_schema(cls) -> io.Schema:
""" """
Return a dictionary which contains config for all input fields. Return a schema which contains all information about the node.
Some types (string): "MODEL", "VAE", "CLIP", "CONDITIONING", "LATENT", "IMAGE", "INT", "STRING", "FLOAT". Some types: "Model", "Vae", "Clip", "Conditioning", "Latent", "Image", "Int", "String", "Float", "Combo".
Input types "INT", "STRING" or "FLOAT" are special values for fields on the node. For outputs the "io.Model.Output" should be used, for inputs the "io.Model.Input" can be used.
The type can be a list for selection. The type can be a "Combo" - this will be a list for selection.
Returns: `dict`:
- Key input_fields_group (`string`): Can be either required, hidden or optional. A node class must have property `required`
- Value input_fields (`dict`): Contains input fields config:
* Key field_name (`string`): Name of a entry-point method's argument
* Value field_config (`tuple`):
+ First value is a string indicate the type of field or a list for selection.
+ Second value is a config for type "INT", "STRING" or "FLOAT".
""" """
return { return io.Schema(
"required": { node_id="Example",
"image": ("IMAGE",), display_name="Example Node",
"int_field": ("INT", { category="Example",
"default": 0, inputs=[
"min": 0, #Minimum value io.Image.Input("image"),
"max": 4096, #Maximum value io.Int.Input(
"step": 64, #Slider's step "int_field",
"display": "number", # Cosmetic only: display as "number" or "slider" min=0,
"lazy": True # Will only be evaluated if check_lazy_status requires it max=4096,
}), step=64, # Slider's step
"float_field": ("FLOAT", { display_mode=io.NumberDisplay.number, # Cosmetic only: display as "number" or "slider"
"default": 1.0, lazy=True, # Will only be evaluated if check_lazy_status requires it
"min": 0.0, ),
"max": 10.0, io.Float.Input(
"step": 0.01, "float_field",
"round": 0.001, #The value representing the precision to round to, will be set to the step value by default. Can be set to False to disable rounding. default=1.0,
"display": "number", min=0.0,
"lazy": True max=10.0,
}), step=0.01,
"print_to_screen": (["enable", "disable"],), round=0.001, #The value representing the precision to round to, will be set to the step value by default. Can be set to False to disable rounding.
"string_field": ("STRING", { display_mode=io.NumberDisplay.number,
"multiline": False, #True if you want the field to look like the one on the ClipTextEncode node lazy=True,
"default": "Hello World!", ),
"lazy": True io.Combo.Input("print_to_screen", options=["enable", "disable"]),
}), io.String.Input(
}, "string_field",
} multiline=False, # True if you want the field to look like the one on the ClipTextEncode node
default="Hello world!",
lazy=True,
)
],
outputs=[
io.Image.Output(),
],
)
RETURN_TYPES = ("IMAGE",) @classmethod
#RETURN_NAMES = ("image_output_name",) def check_lazy_status(cls, image, string_field, int_field, float_field, print_to_screen):
FUNCTION = "test"
#OUTPUT_NODE = False
CATEGORY = "Example"
def check_lazy_status(self, image, string_field, int_field, float_field, print_to_screen):
""" """
Return a list of input names that need to be evaluated. Return a list of input names that need to be evaluated.
@ -107,7 +81,8 @@ class Example:
else: else:
return [] return []
def test(self, image, string_field, int_field, float_field, print_to_screen): @classmethod
def execute(cls, image, string_field, int_field, float_field, print_to_screen) -> io.NodeOutput:
if print_to_screen == "enable": if print_to_screen == "enable":
print(f"""Your input contains: print(f"""Your input contains:
string_field aka input text: {string_field} string_field aka input text: {string_field}
@ -116,7 +91,7 @@ class Example:
""") """)
#do some processing on the image, in this example I just invert it #do some processing on the image, in this example I just invert it
image = 1.0 - image image = 1.0 - image
return (image,) return io.NodeOutput(image)
""" """
The node will always be re executed if any of the inputs change but The node will always be re executed if any of the inputs change but
@ -127,7 +102,7 @@ class Example:
changes between executions the LoadImage node is executed again. changes between executions the LoadImage node is executed again.
""" """
#@classmethod #@classmethod
#def IS_CHANGED(s, image, string_field, int_field, float_field, print_to_screen): #def fingerprint_inputs(s, image, string_field, int_field, float_field, print_to_screen):
# return "" # return ""
# Set the web directory, any .js file in that directory will be loaded by the frontend as a frontend extension # Set the web directory, any .js file in that directory will be loaded by the frontend as a frontend extension
@ -143,13 +118,13 @@ async def get_hello(request):
return web.json_response("hello") return web.json_response("hello")
# A dictionary that contains all nodes you want to export with their names class ExampleExtension(ComfyExtension):
# NOTE: names should be globally unique @override
NODE_CLASS_MAPPINGS = { async def get_node_list(self) -> list[type[io.ComfyNode]]:
"Example": Example return [
} Example,
]
# A dictionary that contains the friendly/humanly readable titles for the nodes
NODE_DISPLAY_NAME_MAPPINGS = { async def comfy_entrypoint() -> ExampleExtension: # ComfyUI calls this to load your extension and its nodes.
"Example": "Example Node" return ExampleExtension()
}

View File

@ -22,3 +22,55 @@ lint.select = [
"F", "F",
] ]
exclude = ["*.ipynb", "**/generated/*.pyi"] exclude = ["*.ipynb", "**/generated/*.pyi"]
[tool.pylint]
master.py-version = "3.9"
master.extension-pkg-allow-list = [
"pydantic",
]
reports.output-format = "colorized"
similarities.ignore-imports = "yes"
messages_control.disable = [
"missing-module-docstring",
"missing-class-docstring",
"missing-function-docstring",
"line-too-long",
"too-few-public-methods",
"too-many-public-methods",
"too-many-instance-attributes",
"too-many-positional-arguments",
"broad-exception-raised",
"too-many-lines",
"invalid-name",
"unused-argument",
"broad-exception-caught",
"consider-using-with",
"fixme",
"too-many-statements",
"too-many-branches",
"too-many-locals",
"too-many-arguments",
"duplicate-code",
"abstract-method",
"superfluous-parens",
"arguments-differ",
"redefined-builtin",
"unnecessary-lambda",
"dangerous-default-value",
# next warnings should be fixed in future
"bad-classmethod-argument", # Class method should have 'cls' as first argument
"wrong-import-order", # Standard imports should be placed before third party imports
"logging-fstring-interpolation", # Use lazy % formatting in logging functions
"ungrouped-imports",
"unnecessary-pass",
"unidiomatic-typecheck",
"unnecessary-lambda-assignment",
"no-else-return",
"no-else-raise",
"invalid-overridden-method",
"unused-variable",
"pointless-string-statement",
"inconsistent-return-statements",
"import-outside-toplevel",
"redefined-outer-name",
]