Compare commits

..

1 Commits

Author SHA1 Message Date
Jukka Seppänen
a1bc7fc000
Merge a95cbd2d7f into 7ee77ff038 2026-01-25 23:36:12 -05:00
10 changed files with 125 additions and 1051 deletions

View File

@ -466,7 +466,7 @@ def load_embed(embedding_name, embedding_directory, embedding_size, embed_key=No
return embed_out
class SDTokenizer:
def __init__(self, tokenizer_path=None, max_length=77, pad_with_end=True, embedding_directory=None, embedding_size=768, embedding_key='clip_l', tokenizer_class=CLIPTokenizer, has_start_token=True, has_end_token=True, pad_to_max_length=True, min_length=None, pad_token=None, end_token=None, start_token=None, min_padding=None, pad_left=False, disable_weights=False, tokenizer_data={}, tokenizer_args={}):
def __init__(self, tokenizer_path=None, max_length=77, pad_with_end=True, embedding_directory=None, embedding_size=768, embedding_key='clip_l', tokenizer_class=CLIPTokenizer, has_start_token=True, has_end_token=True, pad_to_max_length=True, min_length=None, pad_token=None, end_token=None, min_padding=None, pad_left=False, disable_weights=False, tokenizer_data={}, tokenizer_args={}):
if tokenizer_path is None:
tokenizer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "sd1_tokenizer")
self.tokenizer = tokenizer_class.from_pretrained(tokenizer_path, **tokenizer_args)
@ -479,15 +479,8 @@ class SDTokenizer:
empty = self.tokenizer('')["input_ids"]
self.tokenizer_adds_end_token = has_end_token
if has_start_token:
if len(empty) > 0:
self.tokens_start = 1
self.start_token = empty[0]
else:
self.tokens_start = 0
self.start_token = start_token
if start_token is None:
logging.warning("WARNING: There's something wrong with your tokenizers.'")
self.tokens_start = 1
self.start_token = empty[0]
if end_token is not None:
self.end_token = end_token
else:
@ -495,7 +488,7 @@ class SDTokenizer:
self.end_token = empty[1]
else:
self.tokens_start = 0
self.start_token = start_token
self.start_token = None
if end_token is not None:
self.end_token = end_token
else:

View File

@ -118,7 +118,7 @@ class MistralTokenizerClass:
class Mistral3Tokenizer(sd1_clip.SDTokenizer):
def __init__(self, embedding_directory=None, tokenizer_data={}):
self.tekken_data = tokenizer_data.get("tekken_model", None)
super().__init__("", pad_with_end=False, embedding_size=5120, embedding_key='mistral3_24b', tokenizer_class=MistralTokenizerClass, has_end_token=False, pad_to_max_length=False, pad_token=11, start_token=1, max_length=99999999, min_length=1, pad_left=True, tokenizer_args=load_mistral_tokenizer(self.tekken_data), tokenizer_data=tokenizer_data)
super().__init__("", pad_with_end=False, embedding_size=5120, embedding_key='mistral3_24b', tokenizer_class=MistralTokenizerClass, has_end_token=False, pad_to_max_length=False, pad_token=11, max_length=99999999, min_length=1, pad_left=True, tokenizer_args=load_mistral_tokenizer(self.tekken_data), tokenizer_data=tokenizer_data)
def state_dict(self):
return {"tekken_model": self.tekken_data}

View File

@ -13,6 +13,17 @@ class Text2ImageTaskCreationRequest(BaseModel):
watermark: bool | None = Field(False)
class Image2ImageTaskCreationRequest(BaseModel):
model: str = Field(...)
prompt: str = Field(...)
response_format: str | None = Field("url")
image: str = Field(..., description="Base64 encoded string or image URL")
size: str | None = Field("adaptive")
seed: int | None = Field(..., ge=0, le=2147483647)
guidance_scale: float | None = Field(..., ge=1.0, le=10.0)
watermark: bool | None = Field(False)
class Seedream4Options(BaseModel):
max_images: int = Field(15)

View File

@ -1,122 +0,0 @@
from typing import TypedDict
from pydantic import AliasChoices, BaseModel, Field, model_validator
class InputPortraitMode(TypedDict):
portrait_mode: str
portrait_style: str
portrait_beautifier: str
class InputAdvancedSettings(TypedDict):
advanced_settings: str
whites: int
blacks: int
brightness: int
contrast: int
saturation: int
engine: str
transfer_light_a: str
transfer_light_b: str
fixed_generation: bool
class InputSkinEnhancerMode(TypedDict):
mode: str
skin_detail: int
optimized_for: str
class ImageUpscalerCreativeRequest(BaseModel):
image: str = Field(...)
scale_factor: str = Field(...)
optimized_for: str = Field(...)
prompt: str | None = Field(None)
creativity: int = Field(...)
hdr: int = Field(...)
resemblance: int = Field(...)
fractality: int = Field(...)
engine: str = Field(...)
class ImageUpscalerPrecisionV2Request(BaseModel):
image: str = Field(...)
sharpen: int = Field(...)
smart_grain: int = Field(...)
ultra_detail: int = Field(...)
flavor: str = Field(...)
scale_factor: int = Field(...)
class ImageRelightAdvancedSettingsRequest(BaseModel):
whites: int = Field(...)
blacks: int = Field(...)
brightness: int = Field(...)
contrast: int = Field(...)
saturation: int = Field(...)
engine: str = Field(...)
transfer_light_a: str = Field(...)
transfer_light_b: str = Field(...)
fixed_generation: bool = Field(...)
class ImageRelightRequest(BaseModel):
image: str = Field(...)
prompt: str | None = Field(None)
transfer_light_from_reference_image: str | None = Field(None)
light_transfer_strength: int = Field(...)
interpolate_from_original: bool = Field(...)
change_background: bool = Field(...)
style: str = Field(...)
preserve_details: bool = Field(...)
advanced_settings: ImageRelightAdvancedSettingsRequest | None = Field(...)
class ImageStyleTransferRequest(BaseModel):
image: str = Field(...)
reference_image: str = Field(...)
prompt: str | None = Field(None)
style_strength: int = Field(...)
structure_strength: int = Field(...)
is_portrait: bool = Field(...)
portrait_style: str | None = Field(...)
portrait_beautifier: str | None = Field(...)
flavor: str = Field(...)
engine: str = Field(...)
fixed_generation: bool = Field(...)
class ImageSkinEnhancerCreativeRequest(BaseModel):
image: str = Field(...)
sharpen: int = Field(...)
smart_grain: int = Field(...)
class ImageSkinEnhancerFaithfulRequest(BaseModel):
image: str = Field(...)
sharpen: int = Field(...)
smart_grain: int = Field(...)
skin_detail: int = Field(...)
class ImageSkinEnhancerFlexibleRequest(BaseModel):
image: str = Field(...)
sharpen: int = Field(...)
smart_grain: int = Field(...)
optimized_for: str = Field(...)
class TaskResponse(BaseModel):
"""Unified response model that handles both wrapped and unwrapped API responses."""
task_id: str = Field(...)
status: str = Field(validation_alias=AliasChoices("status", "task_status"))
generated: list[str] | None = Field(None)
@model_validator(mode="before")
@classmethod
def unwrap_data(cls, values: dict) -> dict:
if "data" in values and isinstance(values["data"], dict):
return values["data"]
return values

View File

@ -9,6 +9,7 @@ from comfy_api_nodes.apis.bytedance import (
RECOMMENDED_PRESETS,
RECOMMENDED_PRESETS_SEEDREAM_4,
VIDEO_TASKS_EXECUTION_TIME,
Image2ImageTaskCreationRequest,
Image2VideoTaskCreationRequest,
ImageTaskCreationResponse,
Seedream4Options,
@ -173,6 +174,99 @@ class ByteDanceImageNode(IO.ComfyNode):
return IO.NodeOutput(await download_url_to_image_tensor(get_image_url_from_response(response)))
class ByteDanceImageEditNode(IO.ComfyNode):
@classmethod
def define_schema(cls):
return IO.Schema(
node_id="ByteDanceImageEditNode",
display_name="ByteDance Image Edit",
category="api node/image/ByteDance",
description="Edit images using ByteDance models via api based on prompt",
inputs=[
IO.Combo.Input("model", options=["seededit-3-0-i2i-250628"]),
IO.Image.Input(
"image",
tooltip="The base image to edit",
),
IO.String.Input(
"prompt",
multiline=True,
default="",
tooltip="Instruction to edit image",
),
IO.Int.Input(
"seed",
default=0,
min=0,
max=2147483647,
step=1,
display_mode=IO.NumberDisplay.number,
control_after_generate=True,
tooltip="Seed to use for generation",
optional=True,
),
IO.Float.Input(
"guidance_scale",
default=5.5,
min=1.0,
max=10.0,
step=0.01,
display_mode=IO.NumberDisplay.number,
tooltip="Higher value makes the image follow the prompt more closely",
optional=True,
),
IO.Boolean.Input(
"watermark",
default=False,
tooltip='Whether to add an "AI generated" watermark to the image',
optional=True,
),
],
outputs=[
IO.Image.Output(),
],
hidden=[
IO.Hidden.auth_token_comfy_org,
IO.Hidden.api_key_comfy_org,
IO.Hidden.unique_id,
],
is_api_node=True,
is_deprecated=True,
)
@classmethod
async def execute(
cls,
model: str,
image: Input.Image,
prompt: str,
seed: int,
guidance_scale: float,
watermark: bool,
) -> IO.NodeOutput:
validate_string(prompt, strip_whitespace=True, min_length=1)
if get_number_of_images(image) != 1:
raise ValueError("Exactly one input image is required.")
validate_image_aspect_ratio(image, (1, 3), (3, 1))
source_url = (await upload_images_to_comfyapi(cls, image, max_images=1, mime_type="image/png"))[0]
payload = Image2ImageTaskCreationRequest(
model=model,
prompt=prompt,
image=source_url,
seed=seed,
guidance_scale=guidance_scale,
watermark=watermark,
)
response = await sync_op(
cls,
ApiEndpoint(path=BYTEPLUS_IMAGE_ENDPOINT, method="POST"),
data=payload,
response_model=ImageTaskCreationResponse,
)
return IO.NodeOutput(await download_url_to_image_tensor(get_image_url_from_response(response)))
class ByteDanceSeedreamNode(IO.ComfyNode):
@classmethod
@ -1007,6 +1101,7 @@ class ByteDanceExtension(ComfyExtension):
async def get_node_list(self) -> list[type[IO.ComfyNode]]:
return [
ByteDanceImageNode,
ByteDanceImageEditNode,
ByteDanceSeedreamNode,
ByteDanceTextToVideoNode,
ByteDanceImageToVideoNode,

View File

@ -1,889 +0,0 @@
import math
from typing_extensions import override
from comfy_api.latest import IO, ComfyExtension, Input
from comfy_api_nodes.apis.magnific import (
ImageRelightAdvancedSettingsRequest,
ImageRelightRequest,
ImageSkinEnhancerCreativeRequest,
ImageSkinEnhancerFaithfulRequest,
ImageSkinEnhancerFlexibleRequest,
ImageStyleTransferRequest,
ImageUpscalerCreativeRequest,
ImageUpscalerPrecisionV2Request,
InputAdvancedSettings,
InputPortraitMode,
InputSkinEnhancerMode,
TaskResponse,
)
from comfy_api_nodes.util import (
ApiEndpoint,
download_url_to_image_tensor,
downscale_image_tensor,
get_image_dimensions,
get_number_of_images,
poll_op,
sync_op,
upload_images_to_comfyapi,
validate_image_aspect_ratio,
validate_image_dimensions,
)
class MagnificImageUpscalerCreativeNode(IO.ComfyNode):
@classmethod
def define_schema(cls):
return IO.Schema(
node_id="MagnificImageUpscalerCreativeNode",
display_name="Magnific Image Upscale (Creative)",
category="api node/image/Magnific",
description="Promptguided enhancement, stylization, and 2x/4x/8x/16x upscaling. "
"Maximum output: 25.3 megapixels.",
inputs=[
IO.Image.Input("image"),
IO.String.Input("prompt", multiline=True, default=""),
IO.Combo.Input("scale_factor", options=["2x", "4x", "8x", "16x"]),
IO.Combo.Input(
"optimized_for",
options=[
"standard",
"soft_portraits",
"hard_portraits",
"art_n_illustration",
"videogame_assets",
"nature_n_landscapes",
"films_n_photography",
"3d_renders",
"science_fiction_n_horror",
],
),
IO.Int.Input("creativity", min=-10, max=10, default=0, display_mode=IO.NumberDisplay.slider),
IO.Int.Input(
"hdr",
min=-10,
max=10,
default=0,
tooltip="The level of definition and detail.",
display_mode=IO.NumberDisplay.slider,
),
IO.Int.Input(
"resemblance",
min=-10,
max=10,
default=0,
tooltip="The level of resemblance to the original image.",
display_mode=IO.NumberDisplay.slider,
),
IO.Int.Input(
"fractality",
min=-10,
max=10,
default=0,
tooltip="The strength of the prompt and intricacy per square pixel.",
display_mode=IO.NumberDisplay.slider,
),
IO.Combo.Input(
"engine",
options=["automatic", "magnific_illusio", "magnific_sharpy", "magnific_sparkle"],
),
IO.Boolean.Input(
"auto_downscale",
default=False,
tooltip="Automatically downscale input image if output would exceed maximum pixel limit.",
),
],
outputs=[
IO.Image.Output(),
],
hidden=[
IO.Hidden.auth_token_comfy_org,
IO.Hidden.api_key_comfy_org,
IO.Hidden.unique_id,
],
is_api_node=True,
price_badge=IO.PriceBadge(
depends_on=IO.PriceBadgeDepends(widgets=["scale_factor"]),
expr="""
(
$max := widgets.scale_factor = "2x" ? 1.326 : 1.657;
{"type": "range_usd", "min_usd": 0.11, "max_usd": $max}
)
""",
),
)
@classmethod
async def execute(
cls,
image: Input.Image,
prompt: str,
scale_factor: str,
optimized_for: str,
creativity: int,
hdr: int,
resemblance: int,
fractality: int,
engine: str,
auto_downscale: bool,
) -> IO.NodeOutput:
if get_number_of_images(image) != 1:
raise ValueError("Exactly one input image is required.")
validate_image_aspect_ratio(image, (1, 3), (3, 1), strict=False)
validate_image_dimensions(image, min_height=160, min_width=160)
max_output_pixels = 25_300_000
height, width = get_image_dimensions(image)
requested_scale = int(scale_factor.rstrip("x"))
output_pixels = height * width * requested_scale * requested_scale
if output_pixels > max_output_pixels:
if auto_downscale:
# Find optimal scale factor that doesn't require >2x downscale.
# Server upscales in 2x steps, so aggressive downscaling degrades quality.
input_pixels = width * height
scale = 2
max_input_pixels = max_output_pixels // 4
for candidate in [16, 8, 4, 2]:
if candidate > requested_scale:
continue
scale_output_pixels = input_pixels * candidate * candidate
if scale_output_pixels <= max_output_pixels:
scale = candidate
max_input_pixels = None
break
downscale_ratio = math.sqrt(scale_output_pixels / max_output_pixels)
if downscale_ratio <= 2.0:
scale = candidate
max_input_pixels = max_output_pixels // (candidate * candidate)
break
if max_input_pixels is not None:
image = downscale_image_tensor(image, total_pixels=max_input_pixels)
scale_factor = f"{scale}x"
else:
raise ValueError(
f"Output size ({width * requested_scale}x{height * requested_scale} = {output_pixels:,} pixels) "
f"exceeds maximum allowed size of {max_output_pixels:,} pixels. "
f"Use a smaller input image or lower scale factor."
)
initial_res = await sync_op(
cls,
ApiEndpoint(path="/proxy/freepik/v1/ai/image-upscaler", method="POST"),
response_model=TaskResponse,
data=ImageUpscalerCreativeRequest(
image=(await upload_images_to_comfyapi(cls, image, max_images=1, total_pixels=None))[0],
scale_factor=scale_factor,
optimized_for=optimized_for,
creativity=creativity,
hdr=hdr,
resemblance=resemblance,
fractality=fractality,
engine=engine,
prompt=prompt if prompt else None,
),
)
final_response = await poll_op(
cls,
ApiEndpoint(path=f"/proxy/freepik/v1/ai/image-upscaler/{initial_res.task_id}"),
response_model=TaskResponse,
status_extractor=lambda x: x.status,
poll_interval=10.0,
max_poll_attempts=480,
)
return IO.NodeOutput(await download_url_to_image_tensor(final_response.generated[0]))
class MagnificImageUpscalerPreciseV2Node(IO.ComfyNode):
@classmethod
def define_schema(cls):
return IO.Schema(
node_id="MagnificImageUpscalerPreciseV2Node",
display_name="Magnific Image Upscale (Precise V2)",
category="api node/image/Magnific",
description="High-fidelity upscaling with fine control over sharpness, grain, and detail. "
"Maximum output: 10060×10060 pixels.",
inputs=[
IO.Image.Input("image"),
IO.Combo.Input("scale_factor", options=["2x", "4x", "8x", "16x"]),
IO.Combo.Input(
"flavor",
options=["sublime", "photo", "photo_denoiser"],
tooltip="Processing style: "
"sublime for general use, photo for photographs, photo_denoiser for noisy photos.",
),
IO.Int.Input(
"sharpen",
min=0,
max=100,
default=7,
tooltip="Image sharpness intensity. Higher values increase edge definition and clarity.",
display_mode=IO.NumberDisplay.slider,
),
IO.Int.Input(
"smart_grain",
min=0,
max=100,
default=7,
tooltip="Intelligent grain/texture enhancement to prevent the image from "
"looking too smooth or artificial.",
display_mode=IO.NumberDisplay.slider,
),
IO.Int.Input(
"ultra_detail",
min=0,
max=100,
default=30,
tooltip="Controls fine detail, textures, and micro-details added during upscaling.",
display_mode=IO.NumberDisplay.slider,
),
IO.Boolean.Input(
"auto_downscale",
default=False,
tooltip="Automatically downscale input image if output would exceed maximum resolution.",
),
],
outputs=[
IO.Image.Output(),
],
hidden=[
IO.Hidden.auth_token_comfy_org,
IO.Hidden.api_key_comfy_org,
IO.Hidden.unique_id,
],
is_api_node=True,
price_badge=IO.PriceBadge(
depends_on=IO.PriceBadgeDepends(widgets=["scale_factor"]),
expr="""
(
$max := widgets.scale_factor = "2x" ? 1.326 : 1.657;
{"type": "range_usd", "min_usd": 0.11, "max_usd": $max}
)
""",
),
)
@classmethod
async def execute(
cls,
image: Input.Image,
scale_factor: str,
flavor: str,
sharpen: int,
smart_grain: int,
ultra_detail: int,
auto_downscale: bool,
) -> IO.NodeOutput:
if get_number_of_images(image) != 1:
raise ValueError("Exactly one input image is required.")
validate_image_aspect_ratio(image, (1, 3), (3, 1), strict=False)
validate_image_dimensions(image, min_height=160, min_width=160)
max_output_dimension = 10060
height, width = get_image_dimensions(image)
requested_scale = int(scale_factor.strip("x"))
output_width = width * requested_scale
output_height = height * requested_scale
if output_width > max_output_dimension or output_height > max_output_dimension:
if auto_downscale:
# Find optimal scale factor that doesn't require >2x downscale.
# Server upscales in 2x steps, so aggressive downscaling degrades quality.
max_dim = max(width, height)
scale = 2
max_input_dim = max_output_dimension // 2
scale_ratio = max_input_dim / max_dim
max_input_pixels = int(width * height * scale_ratio * scale_ratio)
for candidate in [16, 8, 4, 2]:
if candidate > requested_scale:
continue
output_dim = max_dim * candidate
if output_dim <= max_output_dimension:
scale = candidate
max_input_pixels = None
break
downscale_ratio = output_dim / max_output_dimension
if downscale_ratio <= 2.0:
scale = candidate
max_input_dim = max_output_dimension // candidate
scale_ratio = max_input_dim / max_dim
max_input_pixels = int(width * height * scale_ratio * scale_ratio)
break
if max_input_pixels is not None:
image = downscale_image_tensor(image, total_pixels=max_input_pixels)
requested_scale = scale
else:
raise ValueError(
f"Output dimensions ({output_width}x{output_height}) exceed maximum allowed "
f"resolution of {max_output_dimension}x{max_output_dimension} pixels. "
f"Use a smaller input image or lower scale factor."
)
initial_res = await sync_op(
cls,
ApiEndpoint(path="/proxy/freepik/v1/ai/image-upscaler-precision-v2", method="POST"),
response_model=TaskResponse,
data=ImageUpscalerPrecisionV2Request(
image=(await upload_images_to_comfyapi(cls, image, max_images=1, total_pixels=None))[0],
scale_factor=requested_scale,
flavor=flavor,
sharpen=sharpen,
smart_grain=smart_grain,
ultra_detail=ultra_detail,
),
)
final_response = await poll_op(
cls,
ApiEndpoint(path=f"/proxy/freepik/v1/ai/image-upscaler-precision-v2/{initial_res.task_id}"),
response_model=TaskResponse,
status_extractor=lambda x: x.status,
poll_interval=10.0,
max_poll_attempts=480,
)
return IO.NodeOutput(await download_url_to_image_tensor(final_response.generated[0]))
class MagnificImageStyleTransferNode(IO.ComfyNode):
@classmethod
def define_schema(cls):
return IO.Schema(
node_id="MagnificImageStyleTransferNode",
display_name="Magnific Image Style Transfer",
category="api node/image/Magnific",
description="Transfer the style from a reference image to your input image.",
inputs=[
IO.Image.Input("image", tooltip="The image to apply style transfer to."),
IO.Image.Input("reference_image", tooltip="The reference image to extract style from."),
IO.String.Input("prompt", multiline=True, default=""),
IO.Int.Input(
"style_strength",
min=0,
max=100,
default=100,
tooltip="Percentage of style strength.",
display_mode=IO.NumberDisplay.slider,
),
IO.Int.Input(
"structure_strength",
min=0,
max=100,
default=50,
tooltip="Maintains the structure of the original image.",
display_mode=IO.NumberDisplay.slider,
),
IO.Combo.Input(
"flavor",
options=["faithful", "gen_z", "psychedelia", "detaily", "clear", "donotstyle", "donotstyle_sharp"],
tooltip="Style transfer flavor.",
),
IO.Combo.Input(
"engine",
options=[
"balanced",
"definio",
"illusio",
"3d_cartoon",
"colorful_anime",
"caricature",
"real",
"super_real",
"softy",
],
tooltip="Processing engine selection.",
),
IO.DynamicCombo.Input(
"portrait_mode",
options=[
IO.DynamicCombo.Option("disabled", []),
IO.DynamicCombo.Option(
"enabled",
[
IO.Combo.Input(
"portrait_style",
options=["standard", "pop", "super_pop"],
tooltip="Visual style applied to portrait images.",
),
IO.Combo.Input(
"portrait_beautifier",
options=["none", "beautify_face", "beautify_face_max"],
tooltip="Facial beautification intensity on portraits.",
),
],
),
],
tooltip="Enable portrait mode for facial enhancements.",
),
IO.Boolean.Input(
"fixed_generation",
default=True,
tooltip="When disabled, expect each generation to introduce a degree of randomness, "
"leading to more diverse outcomes.",
),
],
outputs=[
IO.Image.Output(),
],
hidden=[
IO.Hidden.auth_token_comfy_org,
IO.Hidden.api_key_comfy_org,
IO.Hidden.unique_id,
],
is_api_node=True,
price_badge=IO.PriceBadge(
expr="""{"type":"usd","usd":0.11}""",
),
)
@classmethod
async def execute(
cls,
image: Input.Image,
reference_image: Input.Image,
prompt: str,
style_strength: int,
structure_strength: int,
flavor: str,
engine: str,
portrait_mode: InputPortraitMode,
fixed_generation: bool,
) -> IO.NodeOutput:
if get_number_of_images(image) != 1:
raise ValueError("Exactly one input image is required.")
if get_number_of_images(reference_image) != 1:
raise ValueError("Exactly one reference image is required.")
validate_image_aspect_ratio(image, (1, 3), (3, 1), strict=False)
validate_image_aspect_ratio(reference_image, (1, 3), (3, 1), strict=False)
validate_image_dimensions(image, min_height=160, min_width=160)
validate_image_dimensions(reference_image, min_height=160, min_width=160)
is_portrait = portrait_mode["portrait_mode"] == "enabled"
portrait_style = portrait_mode.get("portrait_style", "standard")
portrait_beautifier = portrait_mode.get("portrait_beautifier", "none")
uploaded_urls = await upload_images_to_comfyapi(cls, [image, reference_image], max_images=2)
initial_res = await sync_op(
cls,
ApiEndpoint(path="/proxy/freepik/v1/ai/image-style-transfer", method="POST"),
response_model=TaskResponse,
data=ImageStyleTransferRequest(
image=uploaded_urls[0],
reference_image=uploaded_urls[1],
prompt=prompt if prompt else None,
style_strength=style_strength,
structure_strength=structure_strength,
is_portrait=is_portrait,
portrait_style=portrait_style if is_portrait else None,
portrait_beautifier=portrait_beautifier if is_portrait and portrait_beautifier != "none" else None,
flavor=flavor,
engine=engine,
fixed_generation=fixed_generation,
),
)
final_response = await poll_op(
cls,
ApiEndpoint(path=f"/proxy/freepik/v1/ai/image-style-transfer/{initial_res.task_id}"),
response_model=TaskResponse,
status_extractor=lambda x: x.status,
poll_interval=10.0,
max_poll_attempts=480,
)
return IO.NodeOutput(await download_url_to_image_tensor(final_response.generated[0]))
class MagnificImageRelightNode(IO.ComfyNode):
@classmethod
def define_schema(cls):
return IO.Schema(
node_id="MagnificImageRelightNode",
display_name="Magnific Image Relight",
category="api node/image/Magnific",
description="Relight an image with lighting adjustments and optional reference-based light transfer.",
inputs=[
IO.Image.Input("image", tooltip="The image to relight."),
IO.String.Input(
"prompt",
multiline=True,
default="",
tooltip="Descriptive guidance for lighting. Supports emphasis notation (1-1.4).",
),
IO.Int.Input(
"light_transfer_strength",
min=0,
max=100,
default=100,
tooltip="Intensity of light transfer application.",
display_mode=IO.NumberDisplay.slider,
),
IO.Combo.Input(
"style",
options=[
"standard",
"darker_but_realistic",
"clean",
"smooth",
"brighter",
"contrasted_n_hdr",
"just_composition",
],
tooltip="Stylistic output preference.",
),
IO.Boolean.Input(
"interpolate_from_original",
default=False,
tooltip="Restricts generation freedom to match original more closely.",
),
IO.Boolean.Input(
"change_background",
default=True,
tooltip="Modifies background based on prompt/reference.",
),
IO.Boolean.Input(
"preserve_details",
default=True,
tooltip="Maintains texture and fine details from original.",
),
IO.DynamicCombo.Input(
"advanced_settings",
options=[
IO.DynamicCombo.Option("disabled", []),
IO.DynamicCombo.Option(
"enabled",
[
IO.Int.Input(
"whites",
min=0,
max=100,
default=50,
tooltip="Adjusts the brightest tones in the image.",
display_mode=IO.NumberDisplay.slider,
),
IO.Int.Input(
"blacks",
min=0,
max=100,
default=50,
tooltip="Adjusts the darkest tones in the image.",
display_mode=IO.NumberDisplay.slider,
),
IO.Int.Input(
"brightness",
min=0,
max=100,
default=50,
tooltip="Overall brightness adjustment.",
display_mode=IO.NumberDisplay.slider,
),
IO.Int.Input(
"contrast",
min=0,
max=100,
default=50,
tooltip="Contrast adjustment.",
display_mode=IO.NumberDisplay.slider,
),
IO.Int.Input(
"saturation",
min=0,
max=100,
default=50,
tooltip="Color saturation adjustment.",
display_mode=IO.NumberDisplay.slider,
),
IO.Combo.Input(
"engine",
options=[
"automatic",
"balanced",
"cool",
"real",
"illusio",
"fairy",
"colorful_anime",
"hard_transform",
"softy",
],
tooltip="Processing engine selection.",
),
IO.Combo.Input(
"transfer_light_a",
options=["automatic", "low", "medium", "normal", "high", "high_on_faces"],
tooltip="The intensity of light transfer.",
),
IO.Combo.Input(
"transfer_light_b",
options=[
"automatic",
"composition",
"straight",
"smooth_in",
"smooth_out",
"smooth_both",
"reverse_both",
"soft_in",
"soft_out",
"soft_mid",
# "strong_mid", # Commented out because requests fail when this is set.
"style_shift",
"strong_shift",
],
tooltip="Also modifies light transfer intensity. "
"Can be combined with the previous control for varied effects.",
),
IO.Boolean.Input(
"fixed_generation",
default=True,
tooltip="Ensures consistent output with the same settings.",
),
],
),
],
tooltip="Fine-tuning options for advanced lighting control.",
),
IO.Image.Input(
"reference_image",
optional=True,
tooltip="Optional reference image to transfer lighting from.",
),
],
outputs=[
IO.Image.Output(),
],
hidden=[
IO.Hidden.auth_token_comfy_org,
IO.Hidden.api_key_comfy_org,
IO.Hidden.unique_id,
],
is_api_node=True,
price_badge=IO.PriceBadge(
expr="""{"type":"usd","usd":0.11}""",
),
)
@classmethod
async def execute(
cls,
image: Input.Image,
prompt: str,
light_transfer_strength: int,
style: str,
interpolate_from_original: bool,
change_background: bool,
preserve_details: bool,
advanced_settings: InputAdvancedSettings,
reference_image: Input.Image | None = None,
) -> IO.NodeOutput:
if get_number_of_images(image) != 1:
raise ValueError("Exactly one input image is required.")
if reference_image is not None and get_number_of_images(reference_image) != 1:
raise ValueError("Exactly one reference image is required.")
validate_image_aspect_ratio(image, (1, 3), (3, 1), strict=False)
validate_image_dimensions(image, min_height=160, min_width=160)
if reference_image is not None:
validate_image_aspect_ratio(reference_image, (1, 3), (3, 1), strict=False)
validate_image_dimensions(reference_image, min_height=160, min_width=160)
image_url = (await upload_images_to_comfyapi(cls, image, max_images=1))[0]
reference_url = None
if reference_image is not None:
reference_url = (await upload_images_to_comfyapi(cls, reference_image, max_images=1))[0]
adv_settings = None
if advanced_settings["advanced_settings"] == "enabled":
adv_settings = ImageRelightAdvancedSettingsRequest(
whites=advanced_settings["whites"],
blacks=advanced_settings["blacks"],
brightness=advanced_settings["brightness"],
contrast=advanced_settings["contrast"],
saturation=advanced_settings["saturation"],
engine=advanced_settings["engine"],
transfer_light_a=advanced_settings["transfer_light_a"],
transfer_light_b=advanced_settings["transfer_light_b"],
fixed_generation=advanced_settings["fixed_generation"],
)
initial_res = await sync_op(
cls,
ApiEndpoint(path="/proxy/freepik/v1/ai/image-relight", method="POST"),
response_model=TaskResponse,
data=ImageRelightRequest(
image=image_url,
prompt=prompt if prompt else None,
transfer_light_from_reference_image=reference_url,
light_transfer_strength=light_transfer_strength,
interpolate_from_original=interpolate_from_original,
change_background=change_background,
style=style,
preserve_details=preserve_details,
advanced_settings=adv_settings,
),
)
final_response = await poll_op(
cls,
ApiEndpoint(path=f"/proxy/freepik/v1/ai/image-relight/{initial_res.task_id}"),
response_model=TaskResponse,
status_extractor=lambda x: x.status,
poll_interval=10.0,
max_poll_attempts=480,
)
return IO.NodeOutput(await download_url_to_image_tensor(final_response.generated[0]))
class MagnificImageSkinEnhancerNode(IO.ComfyNode):
@classmethod
def define_schema(cls):
return IO.Schema(
node_id="MagnificImageSkinEnhancerNode",
display_name="Magnific Image Skin Enhancer",
category="api node/image/Magnific",
description="Skin enhancement for portraits with multiple processing modes.",
inputs=[
IO.Image.Input("image", tooltip="The portrait image to enhance."),
IO.Int.Input(
"sharpen",
min=0,
max=100,
default=0,
tooltip="Sharpening intensity level.",
display_mode=IO.NumberDisplay.slider,
),
IO.Int.Input(
"smart_grain",
min=0,
max=100,
default=2,
tooltip="Smart grain intensity level.",
display_mode=IO.NumberDisplay.slider,
),
IO.DynamicCombo.Input(
"mode",
options=[
IO.DynamicCombo.Option("creative", []),
IO.DynamicCombo.Option(
"faithful",
[
IO.Int.Input(
"skin_detail",
min=0,
max=100,
default=80,
tooltip="Skin detail enhancement level.",
display_mode=IO.NumberDisplay.slider,
),
],
),
IO.DynamicCombo.Option(
"flexible",
[
IO.Combo.Input(
"optimized_for",
options=[
"enhance_skin",
"improve_lighting",
"enhance_everything",
"transform_to_real",
"no_make_up",
],
tooltip="Enhancement optimization target.",
),
],
),
],
tooltip="Processing mode: creative for artistic enhancement, "
"faithful for preserving original appearance, "
"flexible for targeted optimization.",
),
],
outputs=[
IO.Image.Output(),
],
hidden=[
IO.Hidden.auth_token_comfy_org,
IO.Hidden.api_key_comfy_org,
IO.Hidden.unique_id,
],
is_api_node=True,
price_badge=IO.PriceBadge(
depends_on=IO.PriceBadgeDepends(widgets=["mode"]),
expr="""
(
$rates := {"creative": 0.29, "faithful": 0.37, "flexible": 0.45};
{"type":"usd","usd": $lookup($rates, widgets.mode)}
)
""",
),
)
@classmethod
async def execute(
cls,
image: Input.Image,
sharpen: int,
smart_grain: int,
mode: InputSkinEnhancerMode,
) -> IO.NodeOutput:
if get_number_of_images(image) != 1:
raise ValueError("Exactly one input image is required.")
validate_image_aspect_ratio(image, (1, 3), (3, 1), strict=False)
validate_image_dimensions(image, min_height=160, min_width=160)
image_url = (await upload_images_to_comfyapi(cls, image, max_images=1, total_pixels=4096 * 4096))[0]
selected_mode = mode["mode"]
if selected_mode == "creative":
endpoint = "creative"
data = ImageSkinEnhancerCreativeRequest(
image=image_url,
sharpen=sharpen,
smart_grain=smart_grain,
)
elif selected_mode == "faithful":
endpoint = "faithful"
data = ImageSkinEnhancerFaithfulRequest(
image=image_url,
sharpen=sharpen,
smart_grain=smart_grain,
skin_detail=mode["skin_detail"],
)
else: # flexible
endpoint = "flexible"
data = ImageSkinEnhancerFlexibleRequest(
image=image_url,
sharpen=sharpen,
smart_grain=smart_grain,
optimized_for=mode["optimized_for"],
)
initial_res = await sync_op(
cls,
ApiEndpoint(path=f"/proxy/freepik/v1/ai/skin-enhancer/{endpoint}", method="POST"),
response_model=TaskResponse,
data=data,
)
final_response = await poll_op(
cls,
ApiEndpoint(path=f"/proxy/freepik/v1/ai/skin-enhancer/{initial_res.task_id}"),
response_model=TaskResponse,
status_extractor=lambda x: x.status,
poll_interval=10.0,
max_poll_attempts=480,
)
return IO.NodeOutput(await download_url_to_image_tensor(final_response.generated[0]))
class MagnificExtension(ComfyExtension):
@override
async def get_node_list(self) -> list[type[IO.ComfyNode]]:
return [
# MagnificImageUpscalerCreativeNode,
# MagnificImageUpscalerPreciseV2Node,
MagnificImageStyleTransferNode,
MagnificImageRelightNode,
MagnificImageSkinEnhancerNode,
]
async def comfy_entrypoint() -> MagnificExtension:
return MagnificExtension()

View File

@ -56,14 +56,15 @@ def image_tensor_pair_to_batch(image1: torch.Tensor, image2: torch.Tensor) -> to
def tensor_to_bytesio(
image: torch.Tensor,
*,
total_pixels: int | None = 2048 * 2048,
total_pixels: int = 2048 * 2048,
mime_type: str = "image/png",
) -> BytesIO:
"""Converts a torch.Tensor image to a named BytesIO object.
Args:
image: Input torch.Tensor image.
total_pixels: Maximum total pixels for downscaling. If None, no downscaling is performed.
name: Optional filename for the BytesIO object.
total_pixels: Maximum total pixels for potential downscaling.
mime_type: Target image MIME type (e.g., 'image/png', 'image/jpeg', 'image/webp', 'video/mp4').
Returns:
@ -78,14 +79,13 @@ def tensor_to_bytesio(
return img_binary
def tensor_to_pil(image: torch.Tensor, total_pixels: int | None = 2048 * 2048) -> Image.Image:
def tensor_to_pil(image: torch.Tensor, total_pixels: int = 2048 * 2048) -> Image.Image:
"""Converts a single torch.Tensor image [H, W, C] to a PIL Image, optionally downscaling."""
if len(image.shape) > 3:
image = image[0]
# TODO: remove alpha if not allowed and present
input_tensor = image.cpu()
if total_pixels is not None:
input_tensor = downscale_image_tensor(input_tensor.unsqueeze(0), total_pixels=total_pixels).squeeze()
input_tensor = downscale_image_tensor(input_tensor.unsqueeze(0), total_pixels=total_pixels).squeeze()
image_np = (input_tensor.numpy() * 255).astype(np.uint8)
img = Image.fromarray(image_np)
return img
@ -93,14 +93,14 @@ def tensor_to_pil(image: torch.Tensor, total_pixels: int | None = 2048 * 2048) -
def tensor_to_base64_string(
image_tensor: torch.Tensor,
total_pixels: int | None = 2048 * 2048,
total_pixels: int = 2048 * 2048,
mime_type: str = "image/png",
) -> str:
"""Convert [B, H, W, C] or [H, W, C] tensor to a base64 string.
Args:
image_tensor: Input torch.Tensor image.
total_pixels: Maximum total pixels for downscaling. If None, no downscaling is performed.
total_pixels: Maximum total pixels for potential downscaling.
mime_type: Target image MIME type (e.g., 'image/png', 'image/jpeg', 'image/webp', 'video/mp4').
Returns:
@ -161,14 +161,14 @@ def downscale_image_tensor_by_max_side(image: torch.Tensor, *, max_side: int) -
def tensor_to_data_uri(
image_tensor: torch.Tensor,
total_pixels: int | None = 2048 * 2048,
total_pixels: int = 2048 * 2048,
mime_type: str = "image/png",
) -> str:
"""Converts a tensor image to a Data URI string.
Args:
image_tensor: Input torch.Tensor image.
total_pixels: Maximum total pixels for downscaling. If None, no downscaling is performed.
total_pixels: Maximum total pixels for potential downscaling.
mime_type: Target image MIME type (e.g., 'image/png', 'image/jpeg', 'image/webp').
Returns:

View File

@ -49,7 +49,7 @@ async def upload_images_to_comfyapi(
mime_type: str | None = None,
wait_label: str | None = "Uploading",
show_batch_index: bool = True,
total_pixels: int | None = 2048 * 2048,
total_pixels: int = 2048 * 2048,
) -> list[str]:
"""
Uploads images to ComfyUI API and returns download URLs.

View File

@ -223,24 +223,11 @@ class LTXVAddGuide(io.ComfyNode):
return frame_idx, latent_idx
@classmethod
def add_keyframe_index(cls, cond, frame_idx, guiding_latent, scale_factors, latent_downscale_factor=1):
def add_keyframe_index(cls, cond, frame_idx, guiding_latent, scale_factors):
keyframe_idxs, _ = get_keyframe_idxs(cond)
_, latent_coords = cls.PATCHIFIER.patchify(guiding_latent)
pixel_coords = latent_to_pixel_coords(latent_coords, scale_factors, causal_fix=frame_idx == 0) # we need the causal fix only if we're placing the new latents at index 0
pixel_coords[:, 0] += frame_idx
# The following adjusts keyframe end positions for small grid IC-LoRA.
# After dilation, the small grid has the same size and position as the large grid,
# but each token encodes a larger image patch. We adjust the end position (not start)
# so that RoPE represents the correct middle point of each token.
# keyframe_idxs dims: (batch, spatial_dim [t,h,w], token_id, [start, end])
# We only adjust h,w (not t) in dim 1, and only end (not start) in dim 3.
spatial_end_offset = (latent_downscale_factor - 1) * torch.tensor(
scale_factors[1:],
device=pixel_coords.device,
).view(1, -1, 1, 1)
pixel_coords[:, 1:, :, 1:] += spatial_end_offset.to(pixel_coords.dtype)
if keyframe_idxs is None:
keyframe_idxs = pixel_coords
else:
@ -248,12 +235,12 @@ class LTXVAddGuide(io.ComfyNode):
return node_helpers.conditioning_set_values(cond, {"keyframe_idxs": keyframe_idxs})
@classmethod
def append_keyframe(cls, positive, negative, frame_idx, latent_image, noise_mask, guiding_latent, strength, scale_factors, guide_mask=None, in_channels=128, latent_downscale_factor=1):
def append_keyframe(cls, positive, negative, frame_idx, latent_image, noise_mask, guiding_latent, strength, scale_factors, guide_mask=None, in_channels=128):
if latent_image.shape[1] != in_channels or guiding_latent.shape[1] != in_channels:
raise ValueError("Adding guide to a combined AV latent is not supported.")
positive = cls.add_keyframe_index(positive, frame_idx, guiding_latent, scale_factors, latent_downscale_factor)
negative = cls.add_keyframe_index(negative, frame_idx, guiding_latent, scale_factors, latent_downscale_factor)
positive = cls.add_keyframe_index(positive, frame_idx, guiding_latent, scale_factors)
negative = cls.add_keyframe_index(negative, frame_idx, guiding_latent, scale_factors)
if guide_mask is not None:
target_h = max(noise_mask.shape[3], guide_mask.shape[3])

View File

@ -22,7 +22,6 @@ alembic
SQLAlchemy
av>=14.2.0
comfy-kitchen>=0.2.7
requests
#non essential dependencies:
kornia>=0.7.1