Merge branch 'master' into dr-support-pip-cm

This commit is contained in:
Dr.Lt.Data 2025-10-04 07:09:43 +09:00
commit 8634b19bc7
11 changed files with 412 additions and 350 deletions

View File

@ -903,7 +903,7 @@ class MotionEncoder_tc(nn.Module):
def __init__(self, def __init__(self,
in_dim: int, in_dim: int,
hidden_dim: int, hidden_dim: int,
num_heads=int, num_heads: int,
need_global=True, need_global=True,
dtype=None, dtype=None,
device=None, device=None,

View File

@ -1321,6 +1321,7 @@ class KlingTextToVideoModelName(str, Enum):
kling_v1 = 'kling-v1' kling_v1 = 'kling-v1'
kling_v1_6 = 'kling-v1-6' kling_v1_6 = 'kling-v1-6'
kling_v2_1_master = 'kling-v2-1-master' kling_v2_1_master = 'kling-v2-1-master'
kling_v2_5_turbo = 'kling-v2-5-turbo'
class KlingVideoGenAspectRatio(str, Enum): class KlingVideoGenAspectRatio(str, Enum):
@ -1355,6 +1356,7 @@ class KlingVideoGenModelName(str, Enum):
kling_v2_master = 'kling-v2-master' kling_v2_master = 'kling-v2-master'
kling_v2_1 = 'kling-v2-1' kling_v2_1 = 'kling-v2-1'
kling_v2_1_master = 'kling-v2-1-master' kling_v2_1_master = 'kling-v2-1-master'
kling_v2_5_turbo = 'kling-v2-5-turbo'
class KlingVideoResult(BaseModel): class KlingVideoResult(BaseModel):

View File

@ -490,7 +490,6 @@ class GeminiInputFiles(ComfyNodeABC):
# Use base64 string directly, not the data URI # Use base64 string directly, not the data URI
with open(file_path, "rb") as f: with open(file_path, "rb") as f:
file_content = f.read() file_content = f.read()
import base64
base64_str = base64.b64encode(file_content).decode("utf-8") base64_str = base64.b64encode(file_content).decode("utf-8")
return GeminiPart( return GeminiPart(

View File

@ -423,6 +423,8 @@ class KlingTextToVideoNode(KlingNodeBase):
"standard mode / 10s duration / kling-v2-master": ("std", "10", "kling-v2-master"), "standard mode / 10s duration / kling-v2-master": ("std", "10", "kling-v2-master"),
"pro mode / 5s duration / kling-v2-1-master": ("pro", "5", "kling-v2-1-master"), "pro mode / 5s duration / kling-v2-1-master": ("pro", "5", "kling-v2-1-master"),
"pro mode / 10s duration / kling-v2-1-master": ("pro", "10", "kling-v2-1-master"), "pro mode / 10s duration / kling-v2-1-master": ("pro", "10", "kling-v2-1-master"),
"pro mode / 5s duration / kling-v2-5-turbo": ("pro", "5", "kling-v2-5-turbo"),
"pro mode / 10s duration / kling-v2-5-turbo": ("pro", "10", "kling-v2-5-turbo"),
} }
@classmethod @classmethod

View File

@ -1,5 +1,7 @@
from inspect import cleandoc from inspect import cleandoc
from typing import Optional from typing import Optional
from typing_extensions import override
from io import BytesIO
from comfy_api_nodes.apis.pixverse_api import ( from comfy_api_nodes.apis.pixverse_api import (
PixverseTextVideoRequest, PixverseTextVideoRequest,
PixverseImageVideoRequest, PixverseImageVideoRequest,
@ -26,12 +28,11 @@ from comfy_api_nodes.apinode_utils import (
tensor_to_bytesio, tensor_to_bytesio,
validate_string, validate_string,
) )
from comfy.comfy_types.node_typing import IO, ComfyNodeABC
from comfy_api.input_impl import VideoFromFile from comfy_api.input_impl import VideoFromFile
from comfy_api.latest import ComfyExtension, io as comfy_io
import torch import torch
import aiohttp import aiohttp
from io import BytesIO
AVERAGE_DURATION_T2V = 32 AVERAGE_DURATION_T2V = 32
@ -72,100 +73,101 @@ async def upload_image_to_pixverse(image: torch.Tensor, auth_kwargs=None):
return response_upload.Resp.img_id return response_upload.Resp.img_id
class PixverseTemplateNode: class PixverseTemplateNode(comfy_io.ComfyNode):
""" """
Select template for PixVerse Video generation. Select template for PixVerse Video generation.
""" """
RETURN_TYPES = (PixverseIO.TEMPLATE,) @classmethod
RETURN_NAMES = ("pixverse_template",) def define_schema(cls) -> comfy_io.Schema:
FUNCTION = "create_template" return comfy_io.Schema(
CATEGORY = "api node/video/PixVerse" node_id="PixverseTemplateNode",
display_name="PixVerse Template",
category="api node/video/PixVerse",
inputs=[
comfy_io.Combo.Input("template", options=[list(pixverse_templates.keys())]),
],
outputs=[comfy_io.Custom(PixverseIO.TEMPLATE).Output(display_name="pixverse_template")],
)
@classmethod @classmethod
def INPUT_TYPES(s): def execute(cls, template: str) -> comfy_io.NodeOutput:
return {
"required": {
"template": (list(pixverse_templates.keys()),),
}
}
def create_template(self, template: str):
template_id = pixverse_templates.get(template, None) template_id = pixverse_templates.get(template, None)
if template_id is None: if template_id is None:
raise Exception(f"Template '{template}' is not recognized.") raise Exception(f"Template '{template}' is not recognized.")
# just return the integer # just return the integer
return (template_id,) return comfy_io.NodeOutput(template_id)
class PixverseTextToVideoNode(ComfyNodeABC): class PixverseTextToVideoNode(comfy_io.ComfyNode):
""" """
Generates videos based on prompt and output_size. Generates videos based on prompt and output_size.
""" """
RETURN_TYPES = (IO.VIDEO,) @classmethod
DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value def define_schema(cls) -> comfy_io.Schema:
FUNCTION = "api_call" return comfy_io.Schema(
API_NODE = True node_id="PixverseTextToVideoNode",
CATEGORY = "api node/video/PixVerse" display_name="PixVerse Text to Video",
category="api node/video/PixVerse",
description=cleandoc(cls.__doc__ or ""),
inputs=[
comfy_io.String.Input(
"prompt",
multiline=True,
default="",
tooltip="Prompt for the video generation",
),
comfy_io.Combo.Input(
"aspect_ratio",
options=[ratio.value for ratio in PixverseAspectRatio],
),
comfy_io.Combo.Input(
"quality",
options=[resolution.value for resolution in PixverseQuality],
default=PixverseQuality.res_540p,
),
comfy_io.Combo.Input(
"duration_seconds",
options=[dur.value for dur in PixverseDuration],
),
comfy_io.Combo.Input(
"motion_mode",
options=[mode.value for mode in PixverseMotionMode],
),
comfy_io.Int.Input(
"seed",
default=0,
min=0,
max=2147483647,
control_after_generate=True,
tooltip="Seed for video generation.",
),
comfy_io.String.Input(
"negative_prompt",
default="",
force_input=True,
tooltip="An optional text description of undesired elements on an image.",
optional=True,
),
comfy_io.Custom(PixverseIO.TEMPLATE).Input(
"pixverse_template",
tooltip="An optional template to influence style of generation, created by the PixVerse Template node.",
optional=True,
),
],
outputs=[comfy_io.Video.Output()],
hidden=[
comfy_io.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id,
],
is_api_node=True,
)
@classmethod @classmethod
def INPUT_TYPES(s): async def execute(
return { cls,
"required": {
"prompt": (
IO.STRING,
{
"multiline": True,
"default": "",
"tooltip": "Prompt for the video generation",
},
),
"aspect_ratio": ([ratio.value for ratio in PixverseAspectRatio],),
"quality": (
[resolution.value for resolution in PixverseQuality],
{
"default": PixverseQuality.res_540p,
},
),
"duration_seconds": ([dur.value for dur in PixverseDuration],),
"motion_mode": ([mode.value for mode in PixverseMotionMode],),
"seed": (
IO.INT,
{
"default": 0,
"min": 0,
"max": 2147483647,
"control_after_generate": True,
"tooltip": "Seed for video generation.",
},
),
},
"optional": {
"negative_prompt": (
IO.STRING,
{
"default": "",
"forceInput": True,
"tooltip": "An optional text description of undesired elements on an image.",
},
),
"pixverse_template": (
PixverseIO.TEMPLATE,
{
"tooltip": "An optional template to influence style of generation, created by the PixVerse Template node."
},
),
},
"hidden": {
"auth_token": "AUTH_TOKEN_COMFY_ORG",
"comfy_api_key": "API_KEY_COMFY_ORG",
"unique_id": "UNIQUE_ID",
},
}
async def api_call(
self,
prompt: str, prompt: str,
aspect_ratio: str, aspect_ratio: str,
quality: str, quality: str,
@ -174,9 +176,7 @@ class PixverseTextToVideoNode(ComfyNodeABC):
seed, seed,
negative_prompt: str = None, negative_prompt: str = None,
pixverse_template: int = None, pixverse_template: int = None,
unique_id: Optional[str] = None, ) -> comfy_io.NodeOutput:
**kwargs,
):
validate_string(prompt, strip_whitespace=False) validate_string(prompt, strip_whitespace=False)
# 1080p is limited to 5 seconds duration # 1080p is limited to 5 seconds duration
# only normal motion_mode supported for 1080p or for non-5 second duration # only normal motion_mode supported for 1080p or for non-5 second duration
@ -186,6 +186,10 @@ class PixverseTextToVideoNode(ComfyNodeABC):
elif duration_seconds != PixverseDuration.dur_5: elif duration_seconds != PixverseDuration.dur_5:
motion_mode = PixverseMotionMode.normal motion_mode = PixverseMotionMode.normal
auth = {
"auth_token": cls.hidden.auth_token_comfy_org,
"comfy_api_key": cls.hidden.api_key_comfy_org,
}
operation = SynchronousOperation( operation = SynchronousOperation(
endpoint=ApiEndpoint( endpoint=ApiEndpoint(
path="/proxy/pixverse/video/text/generate", path="/proxy/pixverse/video/text/generate",
@ -203,7 +207,7 @@ class PixverseTextToVideoNode(ComfyNodeABC):
template_id=pixverse_template, template_id=pixverse_template,
seed=seed, seed=seed,
), ),
auth_kwargs=kwargs, auth_kwargs=auth,
) )
response_api = await operation.execute() response_api = await operation.execute()
@ -224,8 +228,8 @@ class PixverseTextToVideoNode(ComfyNodeABC):
PixverseStatus.deleted, PixverseStatus.deleted,
], ],
status_extractor=lambda x: x.Resp.status, status_extractor=lambda x: x.Resp.status,
auth_kwargs=kwargs, auth_kwargs=auth,
node_id=unique_id, node_id=cls.hidden.unique_id,
result_url_extractor=get_video_url_from_response, result_url_extractor=get_video_url_from_response,
estimated_duration=AVERAGE_DURATION_T2V, estimated_duration=AVERAGE_DURATION_T2V,
) )
@ -233,77 +237,75 @@ class PixverseTextToVideoNode(ComfyNodeABC):
async with aiohttp.ClientSession() as session: async with aiohttp.ClientSession() as session:
async with session.get(response_poll.Resp.url) as vid_response: async with session.get(response_poll.Resp.url) as vid_response:
return (VideoFromFile(BytesIO(await vid_response.content.read())),) return comfy_io.NodeOutput(VideoFromFile(BytesIO(await vid_response.content.read())))
class PixverseImageToVideoNode(ComfyNodeABC): class PixverseImageToVideoNode(comfy_io.ComfyNode):
""" """
Generates videos based on prompt and output_size. Generates videos based on prompt and output_size.
""" """
RETURN_TYPES = (IO.VIDEO,) @classmethod
DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value def define_schema(cls) -> comfy_io.Schema:
FUNCTION = "api_call" return comfy_io.Schema(
API_NODE = True node_id="PixverseImageToVideoNode",
CATEGORY = "api node/video/PixVerse" display_name="PixVerse Image to Video",
category="api node/video/PixVerse",
description=cleandoc(cls.__doc__ or ""),
inputs=[
comfy_io.Image.Input("image"),
comfy_io.String.Input(
"prompt",
multiline=True,
default="",
tooltip="Prompt for the video generation",
),
comfy_io.Combo.Input(
"quality",
options=[resolution.value for resolution in PixverseQuality],
default=PixverseQuality.res_540p,
),
comfy_io.Combo.Input(
"duration_seconds",
options=[dur.value for dur in PixverseDuration],
),
comfy_io.Combo.Input(
"motion_mode",
options=[mode.value for mode in PixverseMotionMode],
),
comfy_io.Int.Input(
"seed",
default=0,
min=0,
max=2147483647,
control_after_generate=True,
tooltip="Seed for video generation.",
),
comfy_io.String.Input(
"negative_prompt",
default="",
force_input=True,
tooltip="An optional text description of undesired elements on an image.",
optional=True,
),
comfy_io.Custom(PixverseIO.TEMPLATE).Input(
"pixverse_template",
tooltip="An optional template to influence style of generation, created by the PixVerse Template node.",
optional=True,
),
],
outputs=[comfy_io.Video.Output()],
hidden=[
comfy_io.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id,
],
is_api_node=True,
)
@classmethod @classmethod
def INPUT_TYPES(s): async def execute(
return { cls,
"required": {
"image": (IO.IMAGE,),
"prompt": (
IO.STRING,
{
"multiline": True,
"default": "",
"tooltip": "Prompt for the video generation",
},
),
"quality": (
[resolution.value for resolution in PixverseQuality],
{
"default": PixverseQuality.res_540p,
},
),
"duration_seconds": ([dur.value for dur in PixverseDuration],),
"motion_mode": ([mode.value for mode in PixverseMotionMode],),
"seed": (
IO.INT,
{
"default": 0,
"min": 0,
"max": 2147483647,
"control_after_generate": True,
"tooltip": "Seed for video generation.",
},
),
},
"optional": {
"negative_prompt": (
IO.STRING,
{
"default": "",
"forceInput": True,
"tooltip": "An optional text description of undesired elements on an image.",
},
),
"pixverse_template": (
PixverseIO.TEMPLATE,
{
"tooltip": "An optional template to influence style of generation, created by the PixVerse Template node."
},
),
},
"hidden": {
"auth_token": "AUTH_TOKEN_COMFY_ORG",
"comfy_api_key": "API_KEY_COMFY_ORG",
"unique_id": "UNIQUE_ID",
},
}
async def api_call(
self,
image: torch.Tensor, image: torch.Tensor,
prompt: str, prompt: str,
quality: str, quality: str,
@ -312,11 +314,13 @@ class PixverseImageToVideoNode(ComfyNodeABC):
seed, seed,
negative_prompt: str = None, negative_prompt: str = None,
pixverse_template: int = None, pixverse_template: int = None,
unique_id: Optional[str] = None, ) -> comfy_io.NodeOutput:
**kwargs,
):
validate_string(prompt, strip_whitespace=False) validate_string(prompt, strip_whitespace=False)
img_id = await upload_image_to_pixverse(image, auth_kwargs=kwargs) auth = {
"auth_token": cls.hidden.auth_token_comfy_org,
"comfy_api_key": cls.hidden.api_key_comfy_org,
}
img_id = await upload_image_to_pixverse(image, auth_kwargs=auth)
# 1080p is limited to 5 seconds duration # 1080p is limited to 5 seconds duration
# only normal motion_mode supported for 1080p or for non-5 second duration # only normal motion_mode supported for 1080p or for non-5 second duration
@ -343,7 +347,7 @@ class PixverseImageToVideoNode(ComfyNodeABC):
template_id=pixverse_template, template_id=pixverse_template,
seed=seed, seed=seed,
), ),
auth_kwargs=kwargs, auth_kwargs=auth,
) )
response_api = await operation.execute() response_api = await operation.execute()
@ -364,8 +368,8 @@ class PixverseImageToVideoNode(ComfyNodeABC):
PixverseStatus.deleted, PixverseStatus.deleted,
], ],
status_extractor=lambda x: x.Resp.status, status_extractor=lambda x: x.Resp.status,
auth_kwargs=kwargs, auth_kwargs=auth,
node_id=unique_id, node_id=cls.hidden.unique_id,
result_url_extractor=get_video_url_from_response, result_url_extractor=get_video_url_from_response,
estimated_duration=AVERAGE_DURATION_I2V, estimated_duration=AVERAGE_DURATION_I2V,
) )
@ -373,72 +377,71 @@ class PixverseImageToVideoNode(ComfyNodeABC):
async with aiohttp.ClientSession() as session: async with aiohttp.ClientSession() as session:
async with session.get(response_poll.Resp.url) as vid_response: async with session.get(response_poll.Resp.url) as vid_response:
return (VideoFromFile(BytesIO(await vid_response.content.read())),) return comfy_io.NodeOutput(VideoFromFile(BytesIO(await vid_response.content.read())))
class PixverseTransitionVideoNode(ComfyNodeABC): class PixverseTransitionVideoNode(comfy_io.ComfyNode):
""" """
Generates videos based on prompt and output_size. Generates videos based on prompt and output_size.
""" """
RETURN_TYPES = (IO.VIDEO,) @classmethod
DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value def define_schema(cls) -> comfy_io.Schema:
FUNCTION = "api_call" return comfy_io.Schema(
API_NODE = True node_id="PixverseTransitionVideoNode",
CATEGORY = "api node/video/PixVerse" display_name="PixVerse Transition Video",
category="api node/video/PixVerse",
description=cleandoc(cls.__doc__ or ""),
inputs=[
comfy_io.Image.Input("first_frame"),
comfy_io.Image.Input("last_frame"),
comfy_io.String.Input(
"prompt",
multiline=True,
default="",
tooltip="Prompt for the video generation",
),
comfy_io.Combo.Input(
"quality",
options=[resolution.value for resolution in PixverseQuality],
default=PixverseQuality.res_540p,
),
comfy_io.Combo.Input(
"duration_seconds",
options=[dur.value for dur in PixverseDuration],
),
comfy_io.Combo.Input(
"motion_mode",
options=[mode.value for mode in PixverseMotionMode],
),
comfy_io.Int.Input(
"seed",
default=0,
min=0,
max=2147483647,
control_after_generate=True,
tooltip="Seed for video generation.",
),
comfy_io.String.Input(
"negative_prompt",
default="",
force_input=True,
tooltip="An optional text description of undesired elements on an image.",
optional=True,
),
],
outputs=[comfy_io.Video.Output()],
hidden=[
comfy_io.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id,
],
is_api_node=True,
)
@classmethod @classmethod
def INPUT_TYPES(s): async def execute(
return { cls,
"required": {
"first_frame": (IO.IMAGE,),
"last_frame": (IO.IMAGE,),
"prompt": (
IO.STRING,
{
"multiline": True,
"default": "",
"tooltip": "Prompt for the video generation",
},
),
"quality": (
[resolution.value for resolution in PixverseQuality],
{
"default": PixverseQuality.res_540p,
},
),
"duration_seconds": ([dur.value for dur in PixverseDuration],),
"motion_mode": ([mode.value for mode in PixverseMotionMode],),
"seed": (
IO.INT,
{
"default": 0,
"min": 0,
"max": 2147483647,
"control_after_generate": True,
"tooltip": "Seed for video generation.",
},
),
},
"optional": {
"negative_prompt": (
IO.STRING,
{
"default": "",
"forceInput": True,
"tooltip": "An optional text description of undesired elements on an image.",
},
),
},
"hidden": {
"auth_token": "AUTH_TOKEN_COMFY_ORG",
"comfy_api_key": "API_KEY_COMFY_ORG",
"unique_id": "UNIQUE_ID",
},
}
async def api_call(
self,
first_frame: torch.Tensor, first_frame: torch.Tensor,
last_frame: torch.Tensor, last_frame: torch.Tensor,
prompt: str, prompt: str,
@ -447,12 +450,14 @@ class PixverseTransitionVideoNode(ComfyNodeABC):
motion_mode: str, motion_mode: str,
seed, seed,
negative_prompt: str = None, negative_prompt: str = None,
unique_id: Optional[str] = None, ) -> comfy_io.NodeOutput:
**kwargs,
):
validate_string(prompt, strip_whitespace=False) validate_string(prompt, strip_whitespace=False)
first_frame_id = await upload_image_to_pixverse(first_frame, auth_kwargs=kwargs) auth = {
last_frame_id = await upload_image_to_pixverse(last_frame, auth_kwargs=kwargs) "auth_token": cls.hidden.auth_token_comfy_org,
"comfy_api_key": cls.hidden.api_key_comfy_org,
}
first_frame_id = await upload_image_to_pixverse(first_frame, auth_kwargs=auth)
last_frame_id = await upload_image_to_pixverse(last_frame, auth_kwargs=auth)
# 1080p is limited to 5 seconds duration # 1080p is limited to 5 seconds duration
# only normal motion_mode supported for 1080p or for non-5 second duration # only normal motion_mode supported for 1080p or for non-5 second duration
@ -479,7 +484,7 @@ class PixverseTransitionVideoNode(ComfyNodeABC):
negative_prompt=negative_prompt if negative_prompt else None, negative_prompt=negative_prompt if negative_prompt else None,
seed=seed, seed=seed,
), ),
auth_kwargs=kwargs, auth_kwargs=auth,
) )
response_api = await operation.execute() response_api = await operation.execute()
@ -500,8 +505,8 @@ class PixverseTransitionVideoNode(ComfyNodeABC):
PixverseStatus.deleted, PixverseStatus.deleted,
], ],
status_extractor=lambda x: x.Resp.status, status_extractor=lambda x: x.Resp.status,
auth_kwargs=kwargs, auth_kwargs=auth,
node_id=unique_id, node_id=cls.hidden.unique_id,
result_url_extractor=get_video_url_from_response, result_url_extractor=get_video_url_from_response,
estimated_duration=AVERAGE_DURATION_T2V, estimated_duration=AVERAGE_DURATION_T2V,
) )
@ -509,19 +514,19 @@ class PixverseTransitionVideoNode(ComfyNodeABC):
async with aiohttp.ClientSession() as session: async with aiohttp.ClientSession() as session:
async with session.get(response_poll.Resp.url) as vid_response: async with session.get(response_poll.Resp.url) as vid_response:
return (VideoFromFile(BytesIO(await vid_response.content.read())),) return comfy_io.NodeOutput(VideoFromFile(BytesIO(await vid_response.content.read())))
NODE_CLASS_MAPPINGS = { class PixVerseExtension(ComfyExtension):
"PixverseTextToVideoNode": PixverseTextToVideoNode, @override
"PixverseImageToVideoNode": PixverseImageToVideoNode, async def get_node_list(self) -> list[type[comfy_io.ComfyNode]]:
"PixverseTransitionVideoNode": PixverseTransitionVideoNode, return [
"PixverseTemplateNode": PixverseTemplateNode, PixverseTextToVideoNode,
} PixverseImageToVideoNode,
PixverseTransitionVideoNode,
PixverseTemplateNode,
]
NODE_DISPLAY_NAME_MAPPINGS = {
"PixverseTextToVideoNode": "PixVerse Text to Video", async def comfy_entrypoint() -> PixVerseExtension:
"PixverseImageToVideoNode": "PixVerse Image to Video", return PixVerseExtension()
"PixverseTransitionVideoNode": "PixVerse Transition Video",
"PixverseTemplateNode": "PixVerse Template",
}

View File

@ -1,26 +1,38 @@
import node_helpers import node_helpers
from typing_extensions import override
from comfy_api.latest import ComfyExtension, io
class ReferenceLatent: class ReferenceLatent(io.ComfyNode):
@classmethod @classmethod
def INPUT_TYPES(s): def define_schema(cls):
return {"required": {"conditioning": ("CONDITIONING", ), return io.Schema(
}, node_id="ReferenceLatent",
"optional": {"latent": ("LATENT", ),} category="advanced/conditioning/edit_models",
} description="This node sets the guiding latent for an edit model. If the model supports it you can chain multiple to set multiple reference images.",
inputs=[
io.Conditioning.Input("conditioning"),
io.Latent.Input("latent", optional=True),
],
outputs=[
io.Conditioning.Output(),
]
)
RETURN_TYPES = ("CONDITIONING",) @classmethod
FUNCTION = "append" def execute(cls, conditioning, latent=None) -> io.NodeOutput:
CATEGORY = "advanced/conditioning/edit_models"
DESCRIPTION = "This node sets the guiding latent for an edit model. If the model supports it you can chain multiple to set multiple reference images."
def append(self, conditioning, latent=None):
if latent is not None: if latent is not None:
conditioning = node_helpers.conditioning_set_values(conditioning, {"reference_latents": [latent["samples"]]}, append=True) conditioning = node_helpers.conditioning_set_values(conditioning, {"reference_latents": [latent["samples"]]}, append=True)
return (conditioning, ) return io.NodeOutput(conditioning)
NODE_CLASS_MAPPINGS = { class EditModelExtension(ComfyExtension):
"ReferenceLatent": ReferenceLatent, @override
} async def get_node_list(self) -> list[type[io.ComfyNode]]:
return [
ReferenceLatent,
]
def comfy_entrypoint() -> EditModelExtension:
return EditModelExtension()

View File

@ -1,4 +1,9 @@
class EpsilonScaling: from typing_extensions import override
from comfy_api.latest import ComfyExtension, io
class EpsilonScaling(io.ComfyNode):
""" """
Implements the Epsilon Scaling method from 'Elucidating the Exposure Bias in Diffusion Models' Implements the Epsilon Scaling method from 'Elucidating the Exposure Bias in Diffusion Models'
(https://arxiv.org/abs/2308.15321v6). (https://arxiv.org/abs/2308.15321v6).
@ -8,26 +13,28 @@ class EpsilonScaling:
recommended by the paper for its practicality and effectiveness. recommended by the paper for its practicality and effectiveness.
""" """
@classmethod @classmethod
def INPUT_TYPES(s): def define_schema(cls):
return { return io.Schema(
"required": { node_id="Epsilon Scaling",
"model": ("MODEL",), category="model_patches/unet",
"scaling_factor": ("FLOAT", { inputs=[
"default": 1.005, io.Model.Input("model"),
"min": 0.5, io.Float.Input(
"max": 1.5, "scaling_factor",
"step": 0.001, default=1.005,
"display": "number" min=0.5,
}), max=1.5,
} step=0.001,
} display_mode=io.NumberDisplay.number,
),
],
outputs=[
io.Model.Output(),
],
)
RETURN_TYPES = ("MODEL",) @classmethod
FUNCTION = "patch" def execute(cls, model, scaling_factor) -> io.NodeOutput:
CATEGORY = "model_patches/unet"
def patch(self, model, scaling_factor):
# Prevent division by zero, though the UI's min value should prevent this. # Prevent division by zero, though the UI's min value should prevent this.
if scaling_factor == 0: if scaling_factor == 0:
scaling_factor = 1e-9 scaling_factor = 1e-9
@ -53,8 +60,15 @@ class EpsilonScaling:
model_clone.set_model_sampler_post_cfg_function(epsilon_scaling_function) model_clone.set_model_sampler_post_cfg_function(epsilon_scaling_function)
return (model_clone,) return io.NodeOutput(model_clone)
NODE_CLASS_MAPPINGS = {
"Epsilon Scaling": EpsilonScaling class EpsilonScalingExtension(ComfyExtension):
} @override
async def get_node_list(self) -> list[type[io.ComfyNode]]:
return [
EpsilonScaling,
]
async def comfy_entrypoint() -> EpsilonScalingExtension:
return EpsilonScalingExtension()

View File

@ -1,7 +1,9 @@
#Taken from: https://github.com/dbolya/tomesd #Taken from: https://github.com/dbolya/tomesd
import torch import torch
from typing import Tuple, Callable from typing import Tuple, Callable, Optional
from typing_extensions import override
from comfy_api.latest import ComfyExtension, io
import math import math
def do_nothing(x: torch.Tensor, mode:str=None): def do_nothing(x: torch.Tensor, mode:str=None):
@ -144,33 +146,45 @@ def get_functions(x, ratio, original_shape):
class TomePatchModel: class TomePatchModel(io.ComfyNode):
@classmethod @classmethod
def INPUT_TYPES(s): def define_schema(cls):
return {"required": { "model": ("MODEL",), return io.Schema(
"ratio": ("FLOAT", {"default": 0.3, "min": 0.0, "max": 1.0, "step": 0.01}), node_id="TomePatchModel",
}} category="model_patches/unet",
RETURN_TYPES = ("MODEL",) inputs=[
FUNCTION = "patch" io.Model.Input("model"),
io.Float.Input("ratio", default=0.3, min=0.0, max=1.0, step=0.01),
],
outputs=[io.Model.Output()],
)
CATEGORY = "model_patches/unet" @classmethod
def execute(cls, model, ratio) -> io.NodeOutput:
def patch(self, model, ratio): u: Optional[Callable] = None
self.u = None
def tomesd_m(q, k, v, extra_options): def tomesd_m(q, k, v, extra_options):
nonlocal u
#NOTE: In the reference code get_functions takes x (input of the transformer block) as the argument instead of q #NOTE: In the reference code get_functions takes x (input of the transformer block) as the argument instead of q
#however from my basic testing it seems that using q instead gives better results #however from my basic testing it seems that using q instead gives better results
m, self.u = get_functions(q, ratio, extra_options["original_shape"]) m, u = get_functions(q, ratio, extra_options["original_shape"])
return m(q), k, v return m(q), k, v
def tomesd_u(n, extra_options): def tomesd_u(n, extra_options):
return self.u(n) nonlocal u
return u(n)
m = model.clone() m = model.clone()
m.set_model_attn1_patch(tomesd_m) m.set_model_attn1_patch(tomesd_m)
m.set_model_attn1_output_patch(tomesd_u) m.set_model_attn1_output_patch(tomesd_u)
return (m, ) return io.NodeOutput(m)
NODE_CLASS_MAPPINGS = { class TomePatchModelExtension(ComfyExtension):
"TomePatchModel": TomePatchModel, @override
} async def get_node_list(self) -> list[type[io.ComfyNode]]:
return [
TomePatchModel,
]
async def comfy_entrypoint() -> TomePatchModelExtension:
return TomePatchModelExtension()

View File

@ -1,23 +1,39 @@
from typing_extensions import override
from comfy_api.latest import ComfyExtension, io
from comfy_api.torch_helpers import set_torch_compile_wrapper from comfy_api.torch_helpers import set_torch_compile_wrapper
class TorchCompileModel: class TorchCompileModel(io.ComfyNode):
@classmethod @classmethod
def INPUT_TYPES(s): def define_schema(cls) -> io.Schema:
return {"required": { "model": ("MODEL",), return io.Schema(
"backend": (["inductor", "cudagraphs"],), node_id="TorchCompileModel",
}} category="_for_testing",
RETURN_TYPES = ("MODEL",) inputs=[
FUNCTION = "patch" io.Model.Input("model"),
io.Combo.Input(
"backend",
options=["inductor", "cudagraphs"],
),
],
outputs=[io.Model.Output()],
is_experimental=True,
)
CATEGORY = "_for_testing" @classmethod
EXPERIMENTAL = True def execute(cls, model, backend) -> io.NodeOutput:
def patch(self, model, backend):
m = model.clone() m = model.clone()
set_torch_compile_wrapper(model=m, backend=backend) set_torch_compile_wrapper(model=m, backend=backend)
return (m, ) return io.NodeOutput(m)
NODE_CLASS_MAPPINGS = {
"TorchCompileModel": TorchCompileModel, class TorchCompileExtension(ComfyExtension):
} @override
async def get_node_list(self) -> list[type[io.ComfyNode]]:
return [
TorchCompileModel,
]
async def comfy_entrypoint() -> TorchCompileExtension:
return TorchCompileExtension()

View File

@ -65,7 +65,6 @@ messages_control.disable = [
"unnecessary-pass", "unnecessary-pass",
"unidiomatic-typecheck", "unidiomatic-typecheck",
"unnecessary-lambda-assignment", "unnecessary-lambda-assignment",
"bad-indentation",
"no-else-return", "no-else-return",
"no-else-raise", "no-else-raise",
"invalid-overridden-method", "invalid-overridden-method",
@ -73,6 +72,5 @@ messages_control.disable = [
"pointless-string-statement", "pointless-string-statement",
"inconsistent-return-statements", "inconsistent-return-statements",
"import-outside-toplevel", "import-outside-toplevel",
"reimported",
"redefined-outer-name", "redefined-outer-name",
] ]