Merge branch 'master' into to-sora-patch-1

This commit is contained in:
to-sora 2026-01-28 14:08:29 +08:00 committed by GitHub
commit 4692d9dbe9
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
19 changed files with 1092 additions and 136 deletions

View File

@ -20,7 +20,7 @@ jobs:
git_tag: ${{ inputs.git_tag }}
cache_tag: "cu130"
python_minor: "13"
python_patch: "9"
python_patch: "11"
rel_name: "nvidia"
rel_extra_name: ""
test_release: true
@ -65,11 +65,11 @@ jobs:
contents: "write"
packages: "write"
pull-requests: "read"
name: "Release AMD ROCm 7.1.1"
name: "Release AMD ROCm 7.2"
uses: ./.github/workflows/stable-release.yml
with:
git_tag: ${{ inputs.git_tag }}
cache_tag: "rocm711"
cache_tag: "rocm72"
python_minor: "12"
python_patch: "10"
rel_name: "amd"

View File

@ -208,7 +208,7 @@ comfy install
## Manual Install (Windows, Linux)
Python 3.14 works but you may encounter issues with the torch compile node. The free threaded variant is still missing some dependencies.
Python 3.14 works but some custom nodes may have issues. The free threaded variant works but some dependencies will enable the GIL so it's not fully supported.
Python 3.13 is very well supported. If you have trouble with some custom node dependencies on 3.13 you can try 3.12

View File

@ -236,6 +236,8 @@ class ComfyNodeABC(ABC):
"""Flags a node as experimental, informing users that it may change or not work as expected."""
DEPRECATED: bool
"""Flags a node as deprecated, indicating to users that they should find alternatives to this node."""
DEV_ONLY: bool
"""Flags a node as dev-only, hiding it from search/menus unless dev mode is enabled."""
API_NODE: Optional[bool]
"""Flags a node as an API node. See: https://docs.comfy.org/tutorials/api-nodes/overview."""

View File

@ -479,10 +479,12 @@ class WanVAE(nn.Module):
def encode(self, x):
conv_idx = [0]
feat_map = [None] * count_conv3d(self.decoder)
## cache
t = x.shape[2]
iter_ = 1 + (t - 1) // 4
feat_map = None
if iter_ > 1:
feat_map = [None] * count_conv3d(self.decoder)
## 对encode输入的x按时间拆分为1、4、4、4....
for i in range(iter_):
conv_idx = [0]
@ -502,10 +504,11 @@ class WanVAE(nn.Module):
def decode(self, z):
conv_idx = [0]
feat_map = [None] * count_conv3d(self.decoder)
# z: [b,c,t,h,w]
iter_ = z.shape[2]
feat_map = None
if iter_ > 1:
feat_map = [None] * count_conv3d(self.decoder)
x = self.conv2(z)
for i in range(iter_):
conv_idx = [0]

View File

@ -466,7 +466,7 @@ def load_embed(embedding_name, embedding_directory, embedding_size, embed_key=No
return embed_out
class SDTokenizer:
def __init__(self, tokenizer_path=None, max_length=77, pad_with_end=True, embedding_directory=None, embedding_size=768, embedding_key='clip_l', tokenizer_class=CLIPTokenizer, has_start_token=True, has_end_token=True, pad_to_max_length=True, min_length=None, pad_token=None, end_token=None, min_padding=None, pad_left=False, disable_weights=False, tokenizer_data={}, tokenizer_args={}):
def __init__(self, tokenizer_path=None, max_length=77, pad_with_end=True, embedding_directory=None, embedding_size=768, embedding_key='clip_l', tokenizer_class=CLIPTokenizer, has_start_token=True, has_end_token=True, pad_to_max_length=True, min_length=None, pad_token=None, end_token=None, start_token=None, min_padding=None, pad_left=False, disable_weights=False, tokenizer_data={}, tokenizer_args={}):
if tokenizer_path is None:
tokenizer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "sd1_tokenizer")
self.tokenizer = tokenizer_class.from_pretrained(tokenizer_path, **tokenizer_args)
@ -479,8 +479,15 @@ class SDTokenizer:
empty = self.tokenizer('')["input_ids"]
self.tokenizer_adds_end_token = has_end_token
if has_start_token:
self.tokens_start = 1
self.start_token = empty[0]
if len(empty) > 0:
self.tokens_start = 1
self.start_token = empty[0]
else:
self.tokens_start = 0
self.start_token = start_token
if start_token is None:
logging.warning("WARNING: There's something wrong with your tokenizers.'")
if end_token is not None:
self.end_token = end_token
else:
@ -488,7 +495,7 @@ class SDTokenizer:
self.end_token = empty[1]
else:
self.tokens_start = 0
self.start_token = None
self.start_token = start_token
if end_token is not None:
self.end_token = end_token
else:

View File

@ -118,7 +118,7 @@ class MistralTokenizerClass:
class Mistral3Tokenizer(sd1_clip.SDTokenizer):
def __init__(self, embedding_directory=None, tokenizer_data={}):
self.tekken_data = tokenizer_data.get("tekken_model", None)
super().__init__("", pad_with_end=False, embedding_size=5120, embedding_key='mistral3_24b', tokenizer_class=MistralTokenizerClass, has_end_token=False, pad_to_max_length=False, pad_token=11, max_length=99999999, min_length=1, pad_left=True, tokenizer_args=load_mistral_tokenizer(self.tekken_data), tokenizer_data=tokenizer_data)
super().__init__("", pad_with_end=False, embedding_size=5120, embedding_key='mistral3_24b', tokenizer_class=MistralTokenizerClass, has_end_token=False, pad_to_max_length=False, pad_token=11, start_token=1, max_length=99999999, min_length=1, pad_left=True, tokenizer_args=load_mistral_tokenizer(self.tekken_data), tokenizer_data=tokenizer_data)
def state_dict(self):
return {"tekken_model": self.tekken_data}

View File

@ -1247,6 +1247,7 @@ class NodeInfoV1:
output_node: bool=None
deprecated: bool=None
experimental: bool=None
dev_only: bool=None
api_node: bool=None
price_badge: dict | None = None
search_aliases: list[str]=None
@ -1264,6 +1265,7 @@ class NodeInfoV3:
output_node: bool=None
deprecated: bool=None
experimental: bool=None
dev_only: bool=None
api_node: bool=None
price_badge: dict | None = None
@ -1375,6 +1377,8 @@ class Schema:
"""Flags a node as deprecated, indicating to users that they should find alternatives to this node."""
is_experimental: bool=False
"""Flags a node as experimental, informing users that it may change or not work as expected."""
is_dev_only: bool=False
"""Flags a node as dev-only, hiding it from search/menus unless dev mode is enabled."""
is_api_node: bool=False
"""Flags a node as an API node. See: https://docs.comfy.org/tutorials/api-nodes/overview."""
price_badge: PriceBadge | None = None
@ -1485,6 +1489,7 @@ class Schema:
output_node=self.is_output_node,
deprecated=self.is_deprecated,
experimental=self.is_experimental,
dev_only=self.is_dev_only,
api_node=self.is_api_node,
python_module=getattr(cls, "RELATIVE_PYTHON_MODULE", "nodes"),
price_badge=self.price_badge.as_dict(self.inputs) if self.price_badge is not None else None,
@ -1519,6 +1524,7 @@ class Schema:
output_node=self.is_output_node,
deprecated=self.is_deprecated,
experimental=self.is_experimental,
dev_only=self.is_dev_only,
api_node=self.is_api_node,
python_module=getattr(cls, "RELATIVE_PYTHON_MODULE", "nodes"),
price_badge=self.price_badge.as_dict(self.inputs) if self.price_badge is not None else None,
@ -1791,6 +1797,14 @@ class _ComfyNodeBaseInternal(_ComfyNodeInternal):
cls.GET_SCHEMA()
return cls._DEPRECATED
_DEV_ONLY = None
@final
@classproperty
def DEV_ONLY(cls): # noqa
if cls._DEV_ONLY is None:
cls.GET_SCHEMA()
return cls._DEV_ONLY
_API_NODE = None
@final
@classproperty
@ -1893,6 +1907,8 @@ class _ComfyNodeBaseInternal(_ComfyNodeInternal):
cls._EXPERIMENTAL = schema.is_experimental
if cls._DEPRECATED is None:
cls._DEPRECATED = schema.is_deprecated
if cls._DEV_ONLY is None:
cls._DEV_ONLY = schema.is_dev_only
if cls._API_NODE is None:
cls._API_NODE = schema.is_api_node
if cls._OUTPUT_NODE is None:

View File

@ -13,17 +13,6 @@ class Text2ImageTaskCreationRequest(BaseModel):
watermark: bool | None = Field(False)
class Image2ImageTaskCreationRequest(BaseModel):
model: str = Field(...)
prompt: str = Field(...)
response_format: str | None = Field("url")
image: str = Field(..., description="Base64 encoded string or image URL")
size: str | None = Field("adaptive")
seed: int | None = Field(..., ge=0, le=2147483647)
guidance_scale: float | None = Field(..., ge=1.0, le=10.0)
watermark: bool | None = Field(False)
class Seedream4Options(BaseModel):
max_images: int = Field(15)

View File

@ -0,0 +1,122 @@
from typing import TypedDict
from pydantic import AliasChoices, BaseModel, Field, model_validator
class InputPortraitMode(TypedDict):
portrait_mode: str
portrait_style: str
portrait_beautifier: str
class InputAdvancedSettings(TypedDict):
advanced_settings: str
whites: int
blacks: int
brightness: int
contrast: int
saturation: int
engine: str
transfer_light_a: str
transfer_light_b: str
fixed_generation: bool
class InputSkinEnhancerMode(TypedDict):
mode: str
skin_detail: int
optimized_for: str
class ImageUpscalerCreativeRequest(BaseModel):
image: str = Field(...)
scale_factor: str = Field(...)
optimized_for: str = Field(...)
prompt: str | None = Field(None)
creativity: int = Field(...)
hdr: int = Field(...)
resemblance: int = Field(...)
fractality: int = Field(...)
engine: str = Field(...)
class ImageUpscalerPrecisionV2Request(BaseModel):
image: str = Field(...)
sharpen: int = Field(...)
smart_grain: int = Field(...)
ultra_detail: int = Field(...)
flavor: str = Field(...)
scale_factor: int = Field(...)
class ImageRelightAdvancedSettingsRequest(BaseModel):
whites: int = Field(...)
blacks: int = Field(...)
brightness: int = Field(...)
contrast: int = Field(...)
saturation: int = Field(...)
engine: str = Field(...)
transfer_light_a: str = Field(...)
transfer_light_b: str = Field(...)
fixed_generation: bool = Field(...)
class ImageRelightRequest(BaseModel):
image: str = Field(...)
prompt: str | None = Field(None)
transfer_light_from_reference_image: str | None = Field(None)
light_transfer_strength: int = Field(...)
interpolate_from_original: bool = Field(...)
change_background: bool = Field(...)
style: str = Field(...)
preserve_details: bool = Field(...)
advanced_settings: ImageRelightAdvancedSettingsRequest | None = Field(...)
class ImageStyleTransferRequest(BaseModel):
image: str = Field(...)
reference_image: str = Field(...)
prompt: str | None = Field(None)
style_strength: int = Field(...)
structure_strength: int = Field(...)
is_portrait: bool = Field(...)
portrait_style: str | None = Field(...)
portrait_beautifier: str | None = Field(...)
flavor: str = Field(...)
engine: str = Field(...)
fixed_generation: bool = Field(...)
class ImageSkinEnhancerCreativeRequest(BaseModel):
image: str = Field(...)
sharpen: int = Field(...)
smart_grain: int = Field(...)
class ImageSkinEnhancerFaithfulRequest(BaseModel):
image: str = Field(...)
sharpen: int = Field(...)
smart_grain: int = Field(...)
skin_detail: int = Field(...)
class ImageSkinEnhancerFlexibleRequest(BaseModel):
image: str = Field(...)
sharpen: int = Field(...)
smart_grain: int = Field(...)
optimized_for: str = Field(...)
class TaskResponse(BaseModel):
"""Unified response model that handles both wrapped and unwrapped API responses."""
task_id: str = Field(...)
status: str = Field(validation_alias=AliasChoices("status", "task_status"))
generated: list[str] | None = Field(None)
@model_validator(mode="before")
@classmethod
def unwrap_data(cls, values: dict) -> dict:
if "data" in values and isinstance(values["data"], dict):
return values["data"]
return values

View File

@ -9,7 +9,6 @@ from comfy_api_nodes.apis.bytedance import (
RECOMMENDED_PRESETS,
RECOMMENDED_PRESETS_SEEDREAM_4,
VIDEO_TASKS_EXECUTION_TIME,
Image2ImageTaskCreationRequest,
Image2VideoTaskCreationRequest,
ImageTaskCreationResponse,
Seedream4Options,
@ -174,99 +173,6 @@ class ByteDanceImageNode(IO.ComfyNode):
return IO.NodeOutput(await download_url_to_image_tensor(get_image_url_from_response(response)))
class ByteDanceImageEditNode(IO.ComfyNode):
@classmethod
def define_schema(cls):
return IO.Schema(
node_id="ByteDanceImageEditNode",
display_name="ByteDance Image Edit",
category="api node/image/ByteDance",
description="Edit images using ByteDance models via api based on prompt",
inputs=[
IO.Combo.Input("model", options=["seededit-3-0-i2i-250628"]),
IO.Image.Input(
"image",
tooltip="The base image to edit",
),
IO.String.Input(
"prompt",
multiline=True,
default="",
tooltip="Instruction to edit image",
),
IO.Int.Input(
"seed",
default=0,
min=0,
max=2147483647,
step=1,
display_mode=IO.NumberDisplay.number,
control_after_generate=True,
tooltip="Seed to use for generation",
optional=True,
),
IO.Float.Input(
"guidance_scale",
default=5.5,
min=1.0,
max=10.0,
step=0.01,
display_mode=IO.NumberDisplay.number,
tooltip="Higher value makes the image follow the prompt more closely",
optional=True,
),
IO.Boolean.Input(
"watermark",
default=False,
tooltip='Whether to add an "AI generated" watermark to the image',
optional=True,
),
],
outputs=[
IO.Image.Output(),
],
hidden=[
IO.Hidden.auth_token_comfy_org,
IO.Hidden.api_key_comfy_org,
IO.Hidden.unique_id,
],
is_api_node=True,
is_deprecated=True,
)
@classmethod
async def execute(
cls,
model: str,
image: Input.Image,
prompt: str,
seed: int,
guidance_scale: float,
watermark: bool,
) -> IO.NodeOutput:
validate_string(prompt, strip_whitespace=True, min_length=1)
if get_number_of_images(image) != 1:
raise ValueError("Exactly one input image is required.")
validate_image_aspect_ratio(image, (1, 3), (3, 1))
source_url = (await upload_images_to_comfyapi(cls, image, max_images=1, mime_type="image/png"))[0]
payload = Image2ImageTaskCreationRequest(
model=model,
prompt=prompt,
image=source_url,
seed=seed,
guidance_scale=guidance_scale,
watermark=watermark,
)
response = await sync_op(
cls,
ApiEndpoint(path=BYTEPLUS_IMAGE_ENDPOINT, method="POST"),
data=payload,
response_model=ImageTaskCreationResponse,
)
return IO.NodeOutput(await download_url_to_image_tensor(get_image_url_from_response(response)))
class ByteDanceSeedreamNode(IO.ComfyNode):
@classmethod
@ -1101,7 +1007,6 @@ class ByteDanceExtension(ComfyExtension):
async def get_node_list(self) -> list[type[IO.ComfyNode]]:
return [
ByteDanceImageNode,
ByteDanceImageEditNode,
ByteDanceSeedreamNode,
ByteDanceTextToVideoNode,
ByteDanceImageToVideoNode,

View File

@ -0,0 +1,889 @@
import math
from typing_extensions import override
from comfy_api.latest import IO, ComfyExtension, Input
from comfy_api_nodes.apis.magnific import (
ImageRelightAdvancedSettingsRequest,
ImageRelightRequest,
ImageSkinEnhancerCreativeRequest,
ImageSkinEnhancerFaithfulRequest,
ImageSkinEnhancerFlexibleRequest,
ImageStyleTransferRequest,
ImageUpscalerCreativeRequest,
ImageUpscalerPrecisionV2Request,
InputAdvancedSettings,
InputPortraitMode,
InputSkinEnhancerMode,
TaskResponse,
)
from comfy_api_nodes.util import (
ApiEndpoint,
download_url_to_image_tensor,
downscale_image_tensor,
get_image_dimensions,
get_number_of_images,
poll_op,
sync_op,
upload_images_to_comfyapi,
validate_image_aspect_ratio,
validate_image_dimensions,
)
class MagnificImageUpscalerCreativeNode(IO.ComfyNode):
@classmethod
def define_schema(cls):
return IO.Schema(
node_id="MagnificImageUpscalerCreativeNode",
display_name="Magnific Image Upscale (Creative)",
category="api node/image/Magnific",
description="Promptguided enhancement, stylization, and 2x/4x/8x/16x upscaling. "
"Maximum output: 25.3 megapixels.",
inputs=[
IO.Image.Input("image"),
IO.String.Input("prompt", multiline=True, default=""),
IO.Combo.Input("scale_factor", options=["2x", "4x", "8x", "16x"]),
IO.Combo.Input(
"optimized_for",
options=[
"standard",
"soft_portraits",
"hard_portraits",
"art_n_illustration",
"videogame_assets",
"nature_n_landscapes",
"films_n_photography",
"3d_renders",
"science_fiction_n_horror",
],
),
IO.Int.Input("creativity", min=-10, max=10, default=0, display_mode=IO.NumberDisplay.slider),
IO.Int.Input(
"hdr",
min=-10,
max=10,
default=0,
tooltip="The level of definition and detail.",
display_mode=IO.NumberDisplay.slider,
),
IO.Int.Input(
"resemblance",
min=-10,
max=10,
default=0,
tooltip="The level of resemblance to the original image.",
display_mode=IO.NumberDisplay.slider,
),
IO.Int.Input(
"fractality",
min=-10,
max=10,
default=0,
tooltip="The strength of the prompt and intricacy per square pixel.",
display_mode=IO.NumberDisplay.slider,
),
IO.Combo.Input(
"engine",
options=["automatic", "magnific_illusio", "magnific_sharpy", "magnific_sparkle"],
),
IO.Boolean.Input(
"auto_downscale",
default=False,
tooltip="Automatically downscale input image if output would exceed maximum pixel limit.",
),
],
outputs=[
IO.Image.Output(),
],
hidden=[
IO.Hidden.auth_token_comfy_org,
IO.Hidden.api_key_comfy_org,
IO.Hidden.unique_id,
],
is_api_node=True,
price_badge=IO.PriceBadge(
depends_on=IO.PriceBadgeDepends(widgets=["scale_factor"]),
expr="""
(
$max := widgets.scale_factor = "2x" ? 1.326 : 1.657;
{"type": "range_usd", "min_usd": 0.11, "max_usd": $max}
)
""",
),
)
@classmethod
async def execute(
cls,
image: Input.Image,
prompt: str,
scale_factor: str,
optimized_for: str,
creativity: int,
hdr: int,
resemblance: int,
fractality: int,
engine: str,
auto_downscale: bool,
) -> IO.NodeOutput:
if get_number_of_images(image) != 1:
raise ValueError("Exactly one input image is required.")
validate_image_aspect_ratio(image, (1, 3), (3, 1), strict=False)
validate_image_dimensions(image, min_height=160, min_width=160)
max_output_pixels = 25_300_000
height, width = get_image_dimensions(image)
requested_scale = int(scale_factor.rstrip("x"))
output_pixels = height * width * requested_scale * requested_scale
if output_pixels > max_output_pixels:
if auto_downscale:
# Find optimal scale factor that doesn't require >2x downscale.
# Server upscales in 2x steps, so aggressive downscaling degrades quality.
input_pixels = width * height
scale = 2
max_input_pixels = max_output_pixels // 4
for candidate in [16, 8, 4, 2]:
if candidate > requested_scale:
continue
scale_output_pixels = input_pixels * candidate * candidate
if scale_output_pixels <= max_output_pixels:
scale = candidate
max_input_pixels = None
break
downscale_ratio = math.sqrt(scale_output_pixels / max_output_pixels)
if downscale_ratio <= 2.0:
scale = candidate
max_input_pixels = max_output_pixels // (candidate * candidate)
break
if max_input_pixels is not None:
image = downscale_image_tensor(image, total_pixels=max_input_pixels)
scale_factor = f"{scale}x"
else:
raise ValueError(
f"Output size ({width * requested_scale}x{height * requested_scale} = {output_pixels:,} pixels) "
f"exceeds maximum allowed size of {max_output_pixels:,} pixels. "
f"Use a smaller input image or lower scale factor."
)
initial_res = await sync_op(
cls,
ApiEndpoint(path="/proxy/freepik/v1/ai/image-upscaler", method="POST"),
response_model=TaskResponse,
data=ImageUpscalerCreativeRequest(
image=(await upload_images_to_comfyapi(cls, image, max_images=1, total_pixels=None))[0],
scale_factor=scale_factor,
optimized_for=optimized_for,
creativity=creativity,
hdr=hdr,
resemblance=resemblance,
fractality=fractality,
engine=engine,
prompt=prompt if prompt else None,
),
)
final_response = await poll_op(
cls,
ApiEndpoint(path=f"/proxy/freepik/v1/ai/image-upscaler/{initial_res.task_id}"),
response_model=TaskResponse,
status_extractor=lambda x: x.status,
poll_interval=10.0,
max_poll_attempts=480,
)
return IO.NodeOutput(await download_url_to_image_tensor(final_response.generated[0]))
class MagnificImageUpscalerPreciseV2Node(IO.ComfyNode):
@classmethod
def define_schema(cls):
return IO.Schema(
node_id="MagnificImageUpscalerPreciseV2Node",
display_name="Magnific Image Upscale (Precise V2)",
category="api node/image/Magnific",
description="High-fidelity upscaling with fine control over sharpness, grain, and detail. "
"Maximum output: 10060×10060 pixels.",
inputs=[
IO.Image.Input("image"),
IO.Combo.Input("scale_factor", options=["2x", "4x", "8x", "16x"]),
IO.Combo.Input(
"flavor",
options=["sublime", "photo", "photo_denoiser"],
tooltip="Processing style: "
"sublime for general use, photo for photographs, photo_denoiser for noisy photos.",
),
IO.Int.Input(
"sharpen",
min=0,
max=100,
default=7,
tooltip="Image sharpness intensity. Higher values increase edge definition and clarity.",
display_mode=IO.NumberDisplay.slider,
),
IO.Int.Input(
"smart_grain",
min=0,
max=100,
default=7,
tooltip="Intelligent grain/texture enhancement to prevent the image from "
"looking too smooth or artificial.",
display_mode=IO.NumberDisplay.slider,
),
IO.Int.Input(
"ultra_detail",
min=0,
max=100,
default=30,
tooltip="Controls fine detail, textures, and micro-details added during upscaling.",
display_mode=IO.NumberDisplay.slider,
),
IO.Boolean.Input(
"auto_downscale",
default=False,
tooltip="Automatically downscale input image if output would exceed maximum resolution.",
),
],
outputs=[
IO.Image.Output(),
],
hidden=[
IO.Hidden.auth_token_comfy_org,
IO.Hidden.api_key_comfy_org,
IO.Hidden.unique_id,
],
is_api_node=True,
price_badge=IO.PriceBadge(
depends_on=IO.PriceBadgeDepends(widgets=["scale_factor"]),
expr="""
(
$max := widgets.scale_factor = "2x" ? 1.326 : 1.657;
{"type": "range_usd", "min_usd": 0.11, "max_usd": $max}
)
""",
),
)
@classmethod
async def execute(
cls,
image: Input.Image,
scale_factor: str,
flavor: str,
sharpen: int,
smart_grain: int,
ultra_detail: int,
auto_downscale: bool,
) -> IO.NodeOutput:
if get_number_of_images(image) != 1:
raise ValueError("Exactly one input image is required.")
validate_image_aspect_ratio(image, (1, 3), (3, 1), strict=False)
validate_image_dimensions(image, min_height=160, min_width=160)
max_output_dimension = 10060
height, width = get_image_dimensions(image)
requested_scale = int(scale_factor.strip("x"))
output_width = width * requested_scale
output_height = height * requested_scale
if output_width > max_output_dimension or output_height > max_output_dimension:
if auto_downscale:
# Find optimal scale factor that doesn't require >2x downscale.
# Server upscales in 2x steps, so aggressive downscaling degrades quality.
max_dim = max(width, height)
scale = 2
max_input_dim = max_output_dimension // 2
scale_ratio = max_input_dim / max_dim
max_input_pixels = int(width * height * scale_ratio * scale_ratio)
for candidate in [16, 8, 4, 2]:
if candidate > requested_scale:
continue
output_dim = max_dim * candidate
if output_dim <= max_output_dimension:
scale = candidate
max_input_pixels = None
break
downscale_ratio = output_dim / max_output_dimension
if downscale_ratio <= 2.0:
scale = candidate
max_input_dim = max_output_dimension // candidate
scale_ratio = max_input_dim / max_dim
max_input_pixels = int(width * height * scale_ratio * scale_ratio)
break
if max_input_pixels is not None:
image = downscale_image_tensor(image, total_pixels=max_input_pixels)
requested_scale = scale
else:
raise ValueError(
f"Output dimensions ({output_width}x{output_height}) exceed maximum allowed "
f"resolution of {max_output_dimension}x{max_output_dimension} pixels. "
f"Use a smaller input image or lower scale factor."
)
initial_res = await sync_op(
cls,
ApiEndpoint(path="/proxy/freepik/v1/ai/image-upscaler-precision-v2", method="POST"),
response_model=TaskResponse,
data=ImageUpscalerPrecisionV2Request(
image=(await upload_images_to_comfyapi(cls, image, max_images=1, total_pixels=None))[0],
scale_factor=requested_scale,
flavor=flavor,
sharpen=sharpen,
smart_grain=smart_grain,
ultra_detail=ultra_detail,
),
)
final_response = await poll_op(
cls,
ApiEndpoint(path=f"/proxy/freepik/v1/ai/image-upscaler-precision-v2/{initial_res.task_id}"),
response_model=TaskResponse,
status_extractor=lambda x: x.status,
poll_interval=10.0,
max_poll_attempts=480,
)
return IO.NodeOutput(await download_url_to_image_tensor(final_response.generated[0]))
class MagnificImageStyleTransferNode(IO.ComfyNode):
@classmethod
def define_schema(cls):
return IO.Schema(
node_id="MagnificImageStyleTransferNode",
display_name="Magnific Image Style Transfer",
category="api node/image/Magnific",
description="Transfer the style from a reference image to your input image.",
inputs=[
IO.Image.Input("image", tooltip="The image to apply style transfer to."),
IO.Image.Input("reference_image", tooltip="The reference image to extract style from."),
IO.String.Input("prompt", multiline=True, default=""),
IO.Int.Input(
"style_strength",
min=0,
max=100,
default=100,
tooltip="Percentage of style strength.",
display_mode=IO.NumberDisplay.slider,
),
IO.Int.Input(
"structure_strength",
min=0,
max=100,
default=50,
tooltip="Maintains the structure of the original image.",
display_mode=IO.NumberDisplay.slider,
),
IO.Combo.Input(
"flavor",
options=["faithful", "gen_z", "psychedelia", "detaily", "clear", "donotstyle", "donotstyle_sharp"],
tooltip="Style transfer flavor.",
),
IO.Combo.Input(
"engine",
options=[
"balanced",
"definio",
"illusio",
"3d_cartoon",
"colorful_anime",
"caricature",
"real",
"super_real",
"softy",
],
tooltip="Processing engine selection.",
),
IO.DynamicCombo.Input(
"portrait_mode",
options=[
IO.DynamicCombo.Option("disabled", []),
IO.DynamicCombo.Option(
"enabled",
[
IO.Combo.Input(
"portrait_style",
options=["standard", "pop", "super_pop"],
tooltip="Visual style applied to portrait images.",
),
IO.Combo.Input(
"portrait_beautifier",
options=["none", "beautify_face", "beautify_face_max"],
tooltip="Facial beautification intensity on portraits.",
),
],
),
],
tooltip="Enable portrait mode for facial enhancements.",
),
IO.Boolean.Input(
"fixed_generation",
default=True,
tooltip="When disabled, expect each generation to introduce a degree of randomness, "
"leading to more diverse outcomes.",
),
],
outputs=[
IO.Image.Output(),
],
hidden=[
IO.Hidden.auth_token_comfy_org,
IO.Hidden.api_key_comfy_org,
IO.Hidden.unique_id,
],
is_api_node=True,
price_badge=IO.PriceBadge(
expr="""{"type":"usd","usd":0.11}""",
),
)
@classmethod
async def execute(
cls,
image: Input.Image,
reference_image: Input.Image,
prompt: str,
style_strength: int,
structure_strength: int,
flavor: str,
engine: str,
portrait_mode: InputPortraitMode,
fixed_generation: bool,
) -> IO.NodeOutput:
if get_number_of_images(image) != 1:
raise ValueError("Exactly one input image is required.")
if get_number_of_images(reference_image) != 1:
raise ValueError("Exactly one reference image is required.")
validate_image_aspect_ratio(image, (1, 3), (3, 1), strict=False)
validate_image_aspect_ratio(reference_image, (1, 3), (3, 1), strict=False)
validate_image_dimensions(image, min_height=160, min_width=160)
validate_image_dimensions(reference_image, min_height=160, min_width=160)
is_portrait = portrait_mode["portrait_mode"] == "enabled"
portrait_style = portrait_mode.get("portrait_style", "standard")
portrait_beautifier = portrait_mode.get("portrait_beautifier", "none")
uploaded_urls = await upload_images_to_comfyapi(cls, [image, reference_image], max_images=2)
initial_res = await sync_op(
cls,
ApiEndpoint(path="/proxy/freepik/v1/ai/image-style-transfer", method="POST"),
response_model=TaskResponse,
data=ImageStyleTransferRequest(
image=uploaded_urls[0],
reference_image=uploaded_urls[1],
prompt=prompt if prompt else None,
style_strength=style_strength,
structure_strength=structure_strength,
is_portrait=is_portrait,
portrait_style=portrait_style if is_portrait else None,
portrait_beautifier=portrait_beautifier if is_portrait and portrait_beautifier != "none" else None,
flavor=flavor,
engine=engine,
fixed_generation=fixed_generation,
),
)
final_response = await poll_op(
cls,
ApiEndpoint(path=f"/proxy/freepik/v1/ai/image-style-transfer/{initial_res.task_id}"),
response_model=TaskResponse,
status_extractor=lambda x: x.status,
poll_interval=10.0,
max_poll_attempts=480,
)
return IO.NodeOutput(await download_url_to_image_tensor(final_response.generated[0]))
class MagnificImageRelightNode(IO.ComfyNode):
@classmethod
def define_schema(cls):
return IO.Schema(
node_id="MagnificImageRelightNode",
display_name="Magnific Image Relight",
category="api node/image/Magnific",
description="Relight an image with lighting adjustments and optional reference-based light transfer.",
inputs=[
IO.Image.Input("image", tooltip="The image to relight."),
IO.String.Input(
"prompt",
multiline=True,
default="",
tooltip="Descriptive guidance for lighting. Supports emphasis notation (1-1.4).",
),
IO.Int.Input(
"light_transfer_strength",
min=0,
max=100,
default=100,
tooltip="Intensity of light transfer application.",
display_mode=IO.NumberDisplay.slider,
),
IO.Combo.Input(
"style",
options=[
"standard",
"darker_but_realistic",
"clean",
"smooth",
"brighter",
"contrasted_n_hdr",
"just_composition",
],
tooltip="Stylistic output preference.",
),
IO.Boolean.Input(
"interpolate_from_original",
default=False,
tooltip="Restricts generation freedom to match original more closely.",
),
IO.Boolean.Input(
"change_background",
default=True,
tooltip="Modifies background based on prompt/reference.",
),
IO.Boolean.Input(
"preserve_details",
default=True,
tooltip="Maintains texture and fine details from original.",
),
IO.DynamicCombo.Input(
"advanced_settings",
options=[
IO.DynamicCombo.Option("disabled", []),
IO.DynamicCombo.Option(
"enabled",
[
IO.Int.Input(
"whites",
min=0,
max=100,
default=50,
tooltip="Adjusts the brightest tones in the image.",
display_mode=IO.NumberDisplay.slider,
),
IO.Int.Input(
"blacks",
min=0,
max=100,
default=50,
tooltip="Adjusts the darkest tones in the image.",
display_mode=IO.NumberDisplay.slider,
),
IO.Int.Input(
"brightness",
min=0,
max=100,
default=50,
tooltip="Overall brightness adjustment.",
display_mode=IO.NumberDisplay.slider,
),
IO.Int.Input(
"contrast",
min=0,
max=100,
default=50,
tooltip="Contrast adjustment.",
display_mode=IO.NumberDisplay.slider,
),
IO.Int.Input(
"saturation",
min=0,
max=100,
default=50,
tooltip="Color saturation adjustment.",
display_mode=IO.NumberDisplay.slider,
),
IO.Combo.Input(
"engine",
options=[
"automatic",
"balanced",
"cool",
"real",
"illusio",
"fairy",
"colorful_anime",
"hard_transform",
"softy",
],
tooltip="Processing engine selection.",
),
IO.Combo.Input(
"transfer_light_a",
options=["automatic", "low", "medium", "normal", "high", "high_on_faces"],
tooltip="The intensity of light transfer.",
),
IO.Combo.Input(
"transfer_light_b",
options=[
"automatic",
"composition",
"straight",
"smooth_in",
"smooth_out",
"smooth_both",
"reverse_both",
"soft_in",
"soft_out",
"soft_mid",
# "strong_mid", # Commented out because requests fail when this is set.
"style_shift",
"strong_shift",
],
tooltip="Also modifies light transfer intensity. "
"Can be combined with the previous control for varied effects.",
),
IO.Boolean.Input(
"fixed_generation",
default=True,
tooltip="Ensures consistent output with the same settings.",
),
],
),
],
tooltip="Fine-tuning options for advanced lighting control.",
),
IO.Image.Input(
"reference_image",
optional=True,
tooltip="Optional reference image to transfer lighting from.",
),
],
outputs=[
IO.Image.Output(),
],
hidden=[
IO.Hidden.auth_token_comfy_org,
IO.Hidden.api_key_comfy_org,
IO.Hidden.unique_id,
],
is_api_node=True,
price_badge=IO.PriceBadge(
expr="""{"type":"usd","usd":0.11}""",
),
)
@classmethod
async def execute(
cls,
image: Input.Image,
prompt: str,
light_transfer_strength: int,
style: str,
interpolate_from_original: bool,
change_background: bool,
preserve_details: bool,
advanced_settings: InputAdvancedSettings,
reference_image: Input.Image | None = None,
) -> IO.NodeOutput:
if get_number_of_images(image) != 1:
raise ValueError("Exactly one input image is required.")
if reference_image is not None and get_number_of_images(reference_image) != 1:
raise ValueError("Exactly one reference image is required.")
validate_image_aspect_ratio(image, (1, 3), (3, 1), strict=False)
validate_image_dimensions(image, min_height=160, min_width=160)
if reference_image is not None:
validate_image_aspect_ratio(reference_image, (1, 3), (3, 1), strict=False)
validate_image_dimensions(reference_image, min_height=160, min_width=160)
image_url = (await upload_images_to_comfyapi(cls, image, max_images=1))[0]
reference_url = None
if reference_image is not None:
reference_url = (await upload_images_to_comfyapi(cls, reference_image, max_images=1))[0]
adv_settings = None
if advanced_settings["advanced_settings"] == "enabled":
adv_settings = ImageRelightAdvancedSettingsRequest(
whites=advanced_settings["whites"],
blacks=advanced_settings["blacks"],
brightness=advanced_settings["brightness"],
contrast=advanced_settings["contrast"],
saturation=advanced_settings["saturation"],
engine=advanced_settings["engine"],
transfer_light_a=advanced_settings["transfer_light_a"],
transfer_light_b=advanced_settings["transfer_light_b"],
fixed_generation=advanced_settings["fixed_generation"],
)
initial_res = await sync_op(
cls,
ApiEndpoint(path="/proxy/freepik/v1/ai/image-relight", method="POST"),
response_model=TaskResponse,
data=ImageRelightRequest(
image=image_url,
prompt=prompt if prompt else None,
transfer_light_from_reference_image=reference_url,
light_transfer_strength=light_transfer_strength,
interpolate_from_original=interpolate_from_original,
change_background=change_background,
style=style,
preserve_details=preserve_details,
advanced_settings=adv_settings,
),
)
final_response = await poll_op(
cls,
ApiEndpoint(path=f"/proxy/freepik/v1/ai/image-relight/{initial_res.task_id}"),
response_model=TaskResponse,
status_extractor=lambda x: x.status,
poll_interval=10.0,
max_poll_attempts=480,
)
return IO.NodeOutput(await download_url_to_image_tensor(final_response.generated[0]))
class MagnificImageSkinEnhancerNode(IO.ComfyNode):
@classmethod
def define_schema(cls):
return IO.Schema(
node_id="MagnificImageSkinEnhancerNode",
display_name="Magnific Image Skin Enhancer",
category="api node/image/Magnific",
description="Skin enhancement for portraits with multiple processing modes.",
inputs=[
IO.Image.Input("image", tooltip="The portrait image to enhance."),
IO.Int.Input(
"sharpen",
min=0,
max=100,
default=0,
tooltip="Sharpening intensity level.",
display_mode=IO.NumberDisplay.slider,
),
IO.Int.Input(
"smart_grain",
min=0,
max=100,
default=2,
tooltip="Smart grain intensity level.",
display_mode=IO.NumberDisplay.slider,
),
IO.DynamicCombo.Input(
"mode",
options=[
IO.DynamicCombo.Option("creative", []),
IO.DynamicCombo.Option(
"faithful",
[
IO.Int.Input(
"skin_detail",
min=0,
max=100,
default=80,
tooltip="Skin detail enhancement level.",
display_mode=IO.NumberDisplay.slider,
),
],
),
IO.DynamicCombo.Option(
"flexible",
[
IO.Combo.Input(
"optimized_for",
options=[
"enhance_skin",
"improve_lighting",
"enhance_everything",
"transform_to_real",
"no_make_up",
],
tooltip="Enhancement optimization target.",
),
],
),
],
tooltip="Processing mode: creative for artistic enhancement, "
"faithful for preserving original appearance, "
"flexible for targeted optimization.",
),
],
outputs=[
IO.Image.Output(),
],
hidden=[
IO.Hidden.auth_token_comfy_org,
IO.Hidden.api_key_comfy_org,
IO.Hidden.unique_id,
],
is_api_node=True,
price_badge=IO.PriceBadge(
depends_on=IO.PriceBadgeDepends(widgets=["mode"]),
expr="""
(
$rates := {"creative": 0.29, "faithful": 0.37, "flexible": 0.45};
{"type":"usd","usd": $lookup($rates, widgets.mode)}
)
""",
),
)
@classmethod
async def execute(
cls,
image: Input.Image,
sharpen: int,
smart_grain: int,
mode: InputSkinEnhancerMode,
) -> IO.NodeOutput:
if get_number_of_images(image) != 1:
raise ValueError("Exactly one input image is required.")
validate_image_aspect_ratio(image, (1, 3), (3, 1), strict=False)
validate_image_dimensions(image, min_height=160, min_width=160)
image_url = (await upload_images_to_comfyapi(cls, image, max_images=1, total_pixels=4096 * 4096))[0]
selected_mode = mode["mode"]
if selected_mode == "creative":
endpoint = "creative"
data = ImageSkinEnhancerCreativeRequest(
image=image_url,
sharpen=sharpen,
smart_grain=smart_grain,
)
elif selected_mode == "faithful":
endpoint = "faithful"
data = ImageSkinEnhancerFaithfulRequest(
image=image_url,
sharpen=sharpen,
smart_grain=smart_grain,
skin_detail=mode["skin_detail"],
)
else: # flexible
endpoint = "flexible"
data = ImageSkinEnhancerFlexibleRequest(
image=image_url,
sharpen=sharpen,
smart_grain=smart_grain,
optimized_for=mode["optimized_for"],
)
initial_res = await sync_op(
cls,
ApiEndpoint(path=f"/proxy/freepik/v1/ai/skin-enhancer/{endpoint}", method="POST"),
response_model=TaskResponse,
data=data,
)
final_response = await poll_op(
cls,
ApiEndpoint(path=f"/proxy/freepik/v1/ai/skin-enhancer/{initial_res.task_id}"),
response_model=TaskResponse,
status_extractor=lambda x: x.status,
poll_interval=10.0,
max_poll_attempts=480,
)
return IO.NodeOutput(await download_url_to_image_tensor(final_response.generated[0]))
class MagnificExtension(ComfyExtension):
@override
async def get_node_list(self) -> list[type[IO.ComfyNode]]:
return [
# MagnificImageUpscalerCreativeNode,
# MagnificImageUpscalerPreciseV2Node,
MagnificImageStyleTransferNode,
MagnificImageRelightNode,
MagnificImageSkinEnhancerNode,
]
async def comfy_entrypoint() -> MagnificExtension:
return MagnificExtension()

View File

@ -56,15 +56,14 @@ def image_tensor_pair_to_batch(image1: torch.Tensor, image2: torch.Tensor) -> to
def tensor_to_bytesio(
image: torch.Tensor,
*,
total_pixels: int = 2048 * 2048,
total_pixels: int | None = 2048 * 2048,
mime_type: str = "image/png",
) -> BytesIO:
"""Converts a torch.Tensor image to a named BytesIO object.
Args:
image: Input torch.Tensor image.
name: Optional filename for the BytesIO object.
total_pixels: Maximum total pixels for potential downscaling.
total_pixels: Maximum total pixels for downscaling. If None, no downscaling is performed.
mime_type: Target image MIME type (e.g., 'image/png', 'image/jpeg', 'image/webp', 'video/mp4').
Returns:
@ -79,13 +78,14 @@ def tensor_to_bytesio(
return img_binary
def tensor_to_pil(image: torch.Tensor, total_pixels: int = 2048 * 2048) -> Image.Image:
def tensor_to_pil(image: torch.Tensor, total_pixels: int | None = 2048 * 2048) -> Image.Image:
"""Converts a single torch.Tensor image [H, W, C] to a PIL Image, optionally downscaling."""
if len(image.shape) > 3:
image = image[0]
# TODO: remove alpha if not allowed and present
input_tensor = image.cpu()
input_tensor = downscale_image_tensor(input_tensor.unsqueeze(0), total_pixels=total_pixels).squeeze()
if total_pixels is not None:
input_tensor = downscale_image_tensor(input_tensor.unsqueeze(0), total_pixels=total_pixels).squeeze()
image_np = (input_tensor.numpy() * 255).astype(np.uint8)
img = Image.fromarray(image_np)
return img
@ -93,14 +93,14 @@ def tensor_to_pil(image: torch.Tensor, total_pixels: int = 2048 * 2048) -> Image
def tensor_to_base64_string(
image_tensor: torch.Tensor,
total_pixels: int = 2048 * 2048,
total_pixels: int | None = 2048 * 2048,
mime_type: str = "image/png",
) -> str:
"""Convert [B, H, W, C] or [H, W, C] tensor to a base64 string.
Args:
image_tensor: Input torch.Tensor image.
total_pixels: Maximum total pixels for potential downscaling.
total_pixels: Maximum total pixels for downscaling. If None, no downscaling is performed.
mime_type: Target image MIME type (e.g., 'image/png', 'image/jpeg', 'image/webp', 'video/mp4').
Returns:
@ -161,14 +161,14 @@ def downscale_image_tensor_by_max_side(image: torch.Tensor, *, max_side: int) -
def tensor_to_data_uri(
image_tensor: torch.Tensor,
total_pixels: int = 2048 * 2048,
total_pixels: int | None = 2048 * 2048,
mime_type: str = "image/png",
) -> str:
"""Converts a tensor image to a Data URI string.
Args:
image_tensor: Input torch.Tensor image.
total_pixels: Maximum total pixels for potential downscaling.
total_pixels: Maximum total pixels for downscaling. If None, no downscaling is performed.
mime_type: Target image MIME type (e.g., 'image/png', 'image/jpeg', 'image/webp').
Returns:

View File

@ -49,7 +49,7 @@ async def upload_images_to_comfyapi(
mime_type: str | None = None,
wait_label: str | None = "Uploading",
show_batch_index: bool = True,
total_pixels: int = 2048 * 2048,
total_pixels: int | None = 2048 * 2048,
) -> list[str]:
"""
Uploads images to ComfyUI API and returns download URLs.

View File

@ -701,7 +701,14 @@ class Noise_EmptyNoise:
def generate_noise(self, input_latent):
latent_image = input_latent["samples"]
return torch.zeros(latent_image.shape, dtype=latent_image.dtype, layout=latent_image.layout, device="cpu")
if latent_image.is_nested:
tensors = latent_image.unbind()
zeros = []
for t in tensors:
zeros.append(torch.zeros(t.shape, dtype=t.dtype, layout=t.layout, device="cpu"))
return comfy.nested_tensor.NestedTensor(zeros)
else:
return torch.zeros(latent_image.shape, dtype=latent_image.dtype, layout=latent_image.layout, device="cpu")
class Noise_RandomNoise:

View File

@ -223,11 +223,24 @@ class LTXVAddGuide(io.ComfyNode):
return frame_idx, latent_idx
@classmethod
def add_keyframe_index(cls, cond, frame_idx, guiding_latent, scale_factors):
def add_keyframe_index(cls, cond, frame_idx, guiding_latent, scale_factors, latent_downscale_factor=1):
keyframe_idxs, _ = get_keyframe_idxs(cond)
_, latent_coords = cls.PATCHIFIER.patchify(guiding_latent)
pixel_coords = latent_to_pixel_coords(latent_coords, scale_factors, causal_fix=frame_idx == 0) # we need the causal fix only if we're placing the new latents at index 0
pixel_coords[:, 0] += frame_idx
# The following adjusts keyframe end positions for small grid IC-LoRA.
# After dilation, the small grid has the same size and position as the large grid,
# but each token encodes a larger image patch. We adjust the end position (not start)
# so that RoPE represents the correct middle point of each token.
# keyframe_idxs dims: (batch, spatial_dim [t,h,w], token_id, [start, end])
# We only adjust h,w (not t) in dim 1, and only end (not start) in dim 3.
spatial_end_offset = (latent_downscale_factor - 1) * torch.tensor(
scale_factors[1:],
device=pixel_coords.device,
).view(1, -1, 1, 1)
pixel_coords[:, 1:, :, 1:] += spatial_end_offset.to(pixel_coords.dtype)
if keyframe_idxs is None:
keyframe_idxs = pixel_coords
else:
@ -235,12 +248,12 @@ class LTXVAddGuide(io.ComfyNode):
return node_helpers.conditioning_set_values(cond, {"keyframe_idxs": keyframe_idxs})
@classmethod
def append_keyframe(cls, positive, negative, frame_idx, latent_image, noise_mask, guiding_latent, strength, scale_factors, guide_mask=None, in_channels=128):
def append_keyframe(cls, positive, negative, frame_idx, latent_image, noise_mask, guiding_latent, strength, scale_factors, guide_mask=None, in_channels=128, latent_downscale_factor=1):
if latent_image.shape[1] != in_channels or guiding_latent.shape[1] != in_channels:
raise ValueError("Adding guide to a combined AV latent is not supported.")
positive = cls.add_keyframe_index(positive, frame_idx, guiding_latent, scale_factors)
negative = cls.add_keyframe_index(negative, frame_idx, guiding_latent, scale_factors)
positive = cls.add_keyframe_index(positive, frame_idx, guiding_latent, scale_factors, latent_downscale_factor)
negative = cls.add_keyframe_index(negative, frame_idx, guiding_latent, scale_factors, latent_downscale_factor)
if guide_mask is not None:
target_h = max(noise_mask.shape[3], guide_mask.shape[3])

View File

@ -1,3 +1,3 @@
# This file is automatically generated by the build process when version is
# updated in pyproject.toml.
__version__ = "0.10.0"
__version__ = "0.11.0"

View File

@ -1,6 +1,6 @@
[project]
name = "ComfyUI"
version = "0.10.0"
version = "0.11.0"
readme = "README.md"
license = { file = "LICENSE" }
requires-python = ">=3.10"

View File

@ -1,5 +1,5 @@
comfyui-frontend-package==1.37.11
comfyui-workflow-templates==0.8.15
comfyui-workflow-templates==0.8.24
comfyui-embedded-docs==0.4.0
torch
torchsde
@ -22,6 +22,7 @@ alembic
SQLAlchemy
av>=14.2.0
comfy-kitchen>=0.2.7
requests
#non essential dependencies:
kornia>=0.7.1

View File

@ -679,6 +679,8 @@ class PromptServer():
info['deprecated'] = True
if getattr(obj_class, "EXPERIMENTAL", False):
info['experimental'] = True
if getattr(obj_class, "DEV_ONLY", False):
info['dev_only'] = True
if hasattr(obj_class, 'API_NODE'):
info['api_node'] = obj_class.API_NODE