Compare commits

...

13 Commits

Author SHA1 Message Date
envy-ai
cec656aaf7
Merge 27d11db345 into 09725967cf 2026-01-26 23:55:38 -05:00
comfyanonymous
09725967cf ComfyUI version v0.11.0
Some checks failed
Execution Tests / test (ubuntu-latest) (push) Waiting to run
Execution Tests / test (windows-latest) (push) Waiting to run
Test server launches without errors / test (push) Waiting to run
Unit Tests / test (macos-latest) (push) Waiting to run
Unit Tests / test (ubuntu-latest) (push) Waiting to run
Unit Tests / test (windows-2022) (push) Waiting to run
Python Linting / Run Ruff (push) Waiting to run
Python Linting / Run Pylint (push) Waiting to run
Build package / Build Test (3.10) (push) Has been cancelled
Build package / Build Test (3.11) (push) Has been cancelled
Build package / Build Test (3.14) (push) Has been cancelled
Build package / Build Test (3.12) (push) Has been cancelled
Build package / Build Test (3.13) (push) Has been cancelled
Full Comfy CI Workflow Runs / test-stable (12.1, , linux, 3.10, [self-hosted Linux], stable) (push) Waiting to run
Full Comfy CI Workflow Runs / test-stable (12.1, , linux, 3.11, [self-hosted Linux], stable) (push) Waiting to run
Full Comfy CI Workflow Runs / test-stable (12.1, , linux, 3.12, [self-hosted Linux], stable) (push) Waiting to run
Full Comfy CI Workflow Runs / test-unix-nightly (12.1, , linux, 3.11, [self-hosted Linux], nightly) (push) Waiting to run
Execution Tests / test (macos-latest) (push) Waiting to run
2026-01-26 23:08:01 -05:00
ComfyUI Wiki
5f62440fbb
chore: update workflow templates to v0.8.24 (#12103) 2026-01-26 22:47:33 -05:00
ComfyUI Wiki
ac91c340f4
Update workflow templates to v0.8.23 (#12102) 2026-01-26 21:39:39 -05:00
comfyanonymous
2db3b0ff90
Update amd portable for rocm 7.2 (#12101)
* Update amd portable for rocm 7.2

* Update Python patch version in release workflow
2026-01-26 19:49:31 -05:00
rattus
6516ab335d
wan-vae: Switch off feature cache for single frame (#12090)
The code throughout is None safe to just skip the feature cache saving
step if none. Set it none in single frame use so qwen doesn't burn VRAM
on the unused cache.
2026-01-26 19:40:19 -05:00
Jukka Seppänen
ad53e78f11
Fix Noise_EmptyNoise when using nested latents (#12089) 2026-01-26 19:25:00 -05:00
Alexander Piskun
29011ba87e
[API Nodes] add Magnific nodes (#11986)
Some checks are pending
Python Linting / Run Ruff (push) Waiting to run
Python Linting / Run Pylint (push) Waiting to run
Build package / Build Test (3.10) (push) Waiting to run
Build package / Build Test (3.14) (push) Waiting to run
Full Comfy CI Workflow Runs / test-stable (12.1, , linux, 3.10, [self-hosted Linux], stable) (push) Waiting to run
Execution Tests / test (macos-latest) (push) Waiting to run
Build package / Build Test (3.11) (push) Waiting to run
Build package / Build Test (3.12) (push) Waiting to run
Build package / Build Test (3.13) (push) Waiting to run
Full Comfy CI Workflow Runs / test-stable (12.1, , linux, 3.11, [self-hosted Linux], stable) (push) Waiting to run
Full Comfy CI Workflow Runs / test-stable (12.1, , linux, 3.12, [self-hosted Linux], stable) (push) Waiting to run
Full Comfy CI Workflow Runs / test-unix-nightly (12.1, , linux, 3.11, [self-hosted Linux], nightly) (push) Waiting to run
Execution Tests / test (windows-latest) (push) Waiting to run
Unit Tests / test (macos-latest) (push) Waiting to run
Unit Tests / test (ubuntu-latest) (push) Waiting to run
Unit Tests / test (windows-2022) (push) Waiting to run
Execution Tests / test (ubuntu-latest) (push) Waiting to run
Test server launches without errors / test (push) Waiting to run
* feat(api-nodes): add Magnific nodes

* aggressive downscaling should not be performed

* disable upscaler nodes

---------

Co-authored-by: Jedrzej Kosinski <kosinkadink1@gmail.com>
2026-01-26 14:10:09 -08:00
Alexander Piskun
cd4985e2f3
chore(api-nodes): remove ByteDanceImageEditNode node (seededit) (#12069)
Co-authored-by: Jedrzej Kosinski <kosinkadink1@gmail.com>
2026-01-26 13:58:33 -08:00
Tavi Halperin
bfe31d0b9d
IC-LoRA: support small grid (#12074) 2026-01-26 15:33:19 -05:00
comfyanonymous
2129e7d278
Fix mistral 3 tokenizer code failing on latest transformers version and other breakage. (#12095)
* Fix mistral 3 tokenizer code failing on latest transformers version.

* Add requests to the requirements
2026-01-26 11:39:00 -05:00
envy-ai
27d11db345 missed adding these in previous commit 2025-05-13 13:11:53 -04:00
envy-ai
90f23bac28 Use cached encoded empty strings rather than all zeros for better quality and prompt adherence with llama-only HiDreams 2025-04-21 01:11:22 -04:00
19 changed files with 1082 additions and 139 deletions

View File

@ -20,7 +20,7 @@ jobs:
git_tag: ${{ inputs.git_tag }}
cache_tag: "cu130"
python_minor: "13"
python_patch: "9"
python_patch: "11"
rel_name: "nvidia"
rel_extra_name: ""
test_release: true
@ -65,11 +65,11 @@ jobs:
contents: "write"
packages: "write"
pull-requests: "read"
name: "Release AMD ROCm 7.1.1"
name: "Release AMD ROCm 7.2"
uses: ./.github/workflows/stable-release.yml
with:
git_tag: ${{ inputs.git_tag }}
cache_tag: "rocm711"
cache_tag: "rocm72"
python_minor: "12"
python_patch: "10"
rel_name: "amd"

View File

@ -479,10 +479,12 @@ class WanVAE(nn.Module):
def encode(self, x):
conv_idx = [0]
feat_map = [None] * count_conv3d(self.decoder)
## cache
t = x.shape[2]
iter_ = 1 + (t - 1) // 4
feat_map = None
if iter_ > 1:
feat_map = [None] * count_conv3d(self.decoder)
## 对encode输入的x按时间拆分为1、4、4、4....
for i in range(iter_):
conv_idx = [0]
@ -502,10 +504,11 @@ class WanVAE(nn.Module):
def decode(self, z):
conv_idx = [0]
feat_map = [None] * count_conv3d(self.decoder)
# z: [b,c,t,h,w]
iter_ = z.shape[2]
feat_map = None
if iter_ > 1:
feat_map = [None] * count_conv3d(self.decoder)
x = self.conv2(z)
for i in range(iter_):
conv_idx = [0]

View File

@ -466,7 +466,7 @@ def load_embed(embedding_name, embedding_directory, embedding_size, embed_key=No
return embed_out
class SDTokenizer:
def __init__(self, tokenizer_path=None, max_length=77, pad_with_end=True, embedding_directory=None, embedding_size=768, embedding_key='clip_l', tokenizer_class=CLIPTokenizer, has_start_token=True, has_end_token=True, pad_to_max_length=True, min_length=None, pad_token=None, end_token=None, min_padding=None, pad_left=False, disable_weights=False, tokenizer_data={}, tokenizer_args={}):
def __init__(self, tokenizer_path=None, max_length=77, pad_with_end=True, embedding_directory=None, embedding_size=768, embedding_key='clip_l', tokenizer_class=CLIPTokenizer, has_start_token=True, has_end_token=True, pad_to_max_length=True, min_length=None, pad_token=None, end_token=None, start_token=None, min_padding=None, pad_left=False, disable_weights=False, tokenizer_data={}, tokenizer_args={}):
if tokenizer_path is None:
tokenizer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "sd1_tokenizer")
self.tokenizer = tokenizer_class.from_pretrained(tokenizer_path, **tokenizer_args)
@ -479,8 +479,15 @@ class SDTokenizer:
empty = self.tokenizer('')["input_ids"]
self.tokenizer_adds_end_token = has_end_token
if has_start_token:
self.tokens_start = 1
self.start_token = empty[0]
if len(empty) > 0:
self.tokens_start = 1
self.start_token = empty[0]
else:
self.tokens_start = 0
self.start_token = start_token
if start_token is None:
logging.warning("WARNING: There's something wrong with your tokenizers.'")
if end_token is not None:
self.end_token = end_token
else:
@ -488,7 +495,7 @@ class SDTokenizer:
self.end_token = empty[1]
else:
self.tokens_start = 0
self.start_token = None
self.start_token = start_token
if end_token is not None:
self.end_token = end_token
else:

View File

@ -118,7 +118,7 @@ class MistralTokenizerClass:
class Mistral3Tokenizer(sd1_clip.SDTokenizer):
def __init__(self, embedding_directory=None, tokenizer_data={}):
self.tekken_data = tokenizer_data.get("tekken_model", None)
super().__init__("", pad_with_end=False, embedding_size=5120, embedding_key='mistral3_24b', tokenizer_class=MistralTokenizerClass, has_end_token=False, pad_to_max_length=False, pad_token=11, max_length=99999999, min_length=1, pad_left=True, tokenizer_args=load_mistral_tokenizer(self.tekken_data), tokenizer_data=tokenizer_data)
super().__init__("", pad_with_end=False, embedding_size=5120, embedding_key='mistral3_24b', tokenizer_class=MistralTokenizerClass, has_end_token=False, pad_to_max_length=False, pad_token=11, start_token=1, max_length=99999999, min_length=1, pad_left=True, tokenizer_args=load_mistral_tokenizer(self.tekken_data), tokenizer_data=tokenizer_data)
def state_dict(self):
return {"tekken_model": self.tekken_data}

View File

@ -5,6 +5,7 @@ from comfy import sdxl_clip
import comfy.model_management
import torch
import logging
import folder_paths
class HiDreamTokenizer:
@ -91,6 +92,8 @@ class HiDreamTEModel(torch.nn.Module):
token_weight_pairs_llama = token_weight_pairs["llama"]
lg_out = None
pooled = None
t5_out = None
ll_out = None
extra = {}
if len(token_weight_pairs_g) > 0 or len(token_weight_pairs_l) > 0:
@ -104,8 +107,9 @@ class HiDreamTEModel(torch.nn.Module):
else:
g_pooled = torch.zeros((1, 1280), device=comfy.model_management.intermediate_device())
pooled = torch.cat((l_pooled, g_pooled), dim=-1)
if self.clip_g is not None and self.clip_l is not None:
pooled = torch.cat((l_pooled, g_pooled), dim=-1)
if self.t5xxl is not None:
t5_output = self.t5xxl.encode_token_weights(token_weight_pairs_t5)
t5_out, t5_pooled = t5_output[:2]
@ -120,13 +124,15 @@ class HiDreamTEModel(torch.nn.Module):
ll_out = None
if t5_out is None:
t5_out = torch.zeros((1, 128, 4096), device=comfy.model_management.intermediate_device())
t5_path = folder_paths.get_full_path_or_raise("hidream_empty_latents", "t5_blank.pt")
t5_out = torch.load(t5_path, map_location=comfy.model_management.intermediate_device())
if ll_out is None:
ll_out = torch.zeros((1, 32, 1, 4096), device=comfy.model_management.intermediate_device())
if pooled is None:
pooled = torch.zeros((1, 768 + 1280), device=comfy.model_management.intermediate_device())
pooled_path = folder_paths.get_full_path_or_raise("hidream_empty_latents", "pooled_blank.pt")
pooled = torch.load(pooled_path, map_location=comfy.model_management.intermediate_device())
extra["conditioning_llama3"] = ll_out
return t5_out, pooled, extra

View File

@ -13,17 +13,6 @@ class Text2ImageTaskCreationRequest(BaseModel):
watermark: bool | None = Field(False)
class Image2ImageTaskCreationRequest(BaseModel):
model: str = Field(...)
prompt: str = Field(...)
response_format: str | None = Field("url")
image: str = Field(..., description="Base64 encoded string or image URL")
size: str | None = Field("adaptive")
seed: int | None = Field(..., ge=0, le=2147483647)
guidance_scale: float | None = Field(..., ge=1.0, le=10.0)
watermark: bool | None = Field(False)
class Seedream4Options(BaseModel):
max_images: int = Field(15)

View File

@ -0,0 +1,122 @@
from typing import TypedDict
from pydantic import AliasChoices, BaseModel, Field, model_validator
class InputPortraitMode(TypedDict):
portrait_mode: str
portrait_style: str
portrait_beautifier: str
class InputAdvancedSettings(TypedDict):
advanced_settings: str
whites: int
blacks: int
brightness: int
contrast: int
saturation: int
engine: str
transfer_light_a: str
transfer_light_b: str
fixed_generation: bool
class InputSkinEnhancerMode(TypedDict):
mode: str
skin_detail: int
optimized_for: str
class ImageUpscalerCreativeRequest(BaseModel):
image: str = Field(...)
scale_factor: str = Field(...)
optimized_for: str = Field(...)
prompt: str | None = Field(None)
creativity: int = Field(...)
hdr: int = Field(...)
resemblance: int = Field(...)
fractality: int = Field(...)
engine: str = Field(...)
class ImageUpscalerPrecisionV2Request(BaseModel):
image: str = Field(...)
sharpen: int = Field(...)
smart_grain: int = Field(...)
ultra_detail: int = Field(...)
flavor: str = Field(...)
scale_factor: int = Field(...)
class ImageRelightAdvancedSettingsRequest(BaseModel):
whites: int = Field(...)
blacks: int = Field(...)
brightness: int = Field(...)
contrast: int = Field(...)
saturation: int = Field(...)
engine: str = Field(...)
transfer_light_a: str = Field(...)
transfer_light_b: str = Field(...)
fixed_generation: bool = Field(...)
class ImageRelightRequest(BaseModel):
image: str = Field(...)
prompt: str | None = Field(None)
transfer_light_from_reference_image: str | None = Field(None)
light_transfer_strength: int = Field(...)
interpolate_from_original: bool = Field(...)
change_background: bool = Field(...)
style: str = Field(...)
preserve_details: bool = Field(...)
advanced_settings: ImageRelightAdvancedSettingsRequest | None = Field(...)
class ImageStyleTransferRequest(BaseModel):
image: str = Field(...)
reference_image: str = Field(...)
prompt: str | None = Field(None)
style_strength: int = Field(...)
structure_strength: int = Field(...)
is_portrait: bool = Field(...)
portrait_style: str | None = Field(...)
portrait_beautifier: str | None = Field(...)
flavor: str = Field(...)
engine: str = Field(...)
fixed_generation: bool = Field(...)
class ImageSkinEnhancerCreativeRequest(BaseModel):
image: str = Field(...)
sharpen: int = Field(...)
smart_grain: int = Field(...)
class ImageSkinEnhancerFaithfulRequest(BaseModel):
image: str = Field(...)
sharpen: int = Field(...)
smart_grain: int = Field(...)
skin_detail: int = Field(...)
class ImageSkinEnhancerFlexibleRequest(BaseModel):
image: str = Field(...)
sharpen: int = Field(...)
smart_grain: int = Field(...)
optimized_for: str = Field(...)
class TaskResponse(BaseModel):
"""Unified response model that handles both wrapped and unwrapped API responses."""
task_id: str = Field(...)
status: str = Field(validation_alias=AliasChoices("status", "task_status"))
generated: list[str] | None = Field(None)
@model_validator(mode="before")
@classmethod
def unwrap_data(cls, values: dict) -> dict:
if "data" in values and isinstance(values["data"], dict):
return values["data"]
return values

View File

@ -9,7 +9,6 @@ from comfy_api_nodes.apis.bytedance import (
RECOMMENDED_PRESETS,
RECOMMENDED_PRESETS_SEEDREAM_4,
VIDEO_TASKS_EXECUTION_TIME,
Image2ImageTaskCreationRequest,
Image2VideoTaskCreationRequest,
ImageTaskCreationResponse,
Seedream4Options,
@ -174,99 +173,6 @@ class ByteDanceImageNode(IO.ComfyNode):
return IO.NodeOutput(await download_url_to_image_tensor(get_image_url_from_response(response)))
class ByteDanceImageEditNode(IO.ComfyNode):
@classmethod
def define_schema(cls):
return IO.Schema(
node_id="ByteDanceImageEditNode",
display_name="ByteDance Image Edit",
category="api node/image/ByteDance",
description="Edit images using ByteDance models via api based on prompt",
inputs=[
IO.Combo.Input("model", options=["seededit-3-0-i2i-250628"]),
IO.Image.Input(
"image",
tooltip="The base image to edit",
),
IO.String.Input(
"prompt",
multiline=True,
default="",
tooltip="Instruction to edit image",
),
IO.Int.Input(
"seed",
default=0,
min=0,
max=2147483647,
step=1,
display_mode=IO.NumberDisplay.number,
control_after_generate=True,
tooltip="Seed to use for generation",
optional=True,
),
IO.Float.Input(
"guidance_scale",
default=5.5,
min=1.0,
max=10.0,
step=0.01,
display_mode=IO.NumberDisplay.number,
tooltip="Higher value makes the image follow the prompt more closely",
optional=True,
),
IO.Boolean.Input(
"watermark",
default=False,
tooltip='Whether to add an "AI generated" watermark to the image',
optional=True,
),
],
outputs=[
IO.Image.Output(),
],
hidden=[
IO.Hidden.auth_token_comfy_org,
IO.Hidden.api_key_comfy_org,
IO.Hidden.unique_id,
],
is_api_node=True,
is_deprecated=True,
)
@classmethod
async def execute(
cls,
model: str,
image: Input.Image,
prompt: str,
seed: int,
guidance_scale: float,
watermark: bool,
) -> IO.NodeOutput:
validate_string(prompt, strip_whitespace=True, min_length=1)
if get_number_of_images(image) != 1:
raise ValueError("Exactly one input image is required.")
validate_image_aspect_ratio(image, (1, 3), (3, 1))
source_url = (await upload_images_to_comfyapi(cls, image, max_images=1, mime_type="image/png"))[0]
payload = Image2ImageTaskCreationRequest(
model=model,
prompt=prompt,
image=source_url,
seed=seed,
guidance_scale=guidance_scale,
watermark=watermark,
)
response = await sync_op(
cls,
ApiEndpoint(path=BYTEPLUS_IMAGE_ENDPOINT, method="POST"),
data=payload,
response_model=ImageTaskCreationResponse,
)
return IO.NodeOutput(await download_url_to_image_tensor(get_image_url_from_response(response)))
class ByteDanceSeedreamNode(IO.ComfyNode):
@classmethod
@ -1101,7 +1007,6 @@ class ByteDanceExtension(ComfyExtension):
async def get_node_list(self) -> list[type[IO.ComfyNode]]:
return [
ByteDanceImageNode,
ByteDanceImageEditNode,
ByteDanceSeedreamNode,
ByteDanceTextToVideoNode,
ByteDanceImageToVideoNode,

View File

@ -0,0 +1,889 @@
import math
from typing_extensions import override
from comfy_api.latest import IO, ComfyExtension, Input
from comfy_api_nodes.apis.magnific import (
ImageRelightAdvancedSettingsRequest,
ImageRelightRequest,
ImageSkinEnhancerCreativeRequest,
ImageSkinEnhancerFaithfulRequest,
ImageSkinEnhancerFlexibleRequest,
ImageStyleTransferRequest,
ImageUpscalerCreativeRequest,
ImageUpscalerPrecisionV2Request,
InputAdvancedSettings,
InputPortraitMode,
InputSkinEnhancerMode,
TaskResponse,
)
from comfy_api_nodes.util import (
ApiEndpoint,
download_url_to_image_tensor,
downscale_image_tensor,
get_image_dimensions,
get_number_of_images,
poll_op,
sync_op,
upload_images_to_comfyapi,
validate_image_aspect_ratio,
validate_image_dimensions,
)
class MagnificImageUpscalerCreativeNode(IO.ComfyNode):
@classmethod
def define_schema(cls):
return IO.Schema(
node_id="MagnificImageUpscalerCreativeNode",
display_name="Magnific Image Upscale (Creative)",
category="api node/image/Magnific",
description="Promptguided enhancement, stylization, and 2x/4x/8x/16x upscaling. "
"Maximum output: 25.3 megapixels.",
inputs=[
IO.Image.Input("image"),
IO.String.Input("prompt", multiline=True, default=""),
IO.Combo.Input("scale_factor", options=["2x", "4x", "8x", "16x"]),
IO.Combo.Input(
"optimized_for",
options=[
"standard",
"soft_portraits",
"hard_portraits",
"art_n_illustration",
"videogame_assets",
"nature_n_landscapes",
"films_n_photography",
"3d_renders",
"science_fiction_n_horror",
],
),
IO.Int.Input("creativity", min=-10, max=10, default=0, display_mode=IO.NumberDisplay.slider),
IO.Int.Input(
"hdr",
min=-10,
max=10,
default=0,
tooltip="The level of definition and detail.",
display_mode=IO.NumberDisplay.slider,
),
IO.Int.Input(
"resemblance",
min=-10,
max=10,
default=0,
tooltip="The level of resemblance to the original image.",
display_mode=IO.NumberDisplay.slider,
),
IO.Int.Input(
"fractality",
min=-10,
max=10,
default=0,
tooltip="The strength of the prompt and intricacy per square pixel.",
display_mode=IO.NumberDisplay.slider,
),
IO.Combo.Input(
"engine",
options=["automatic", "magnific_illusio", "magnific_sharpy", "magnific_sparkle"],
),
IO.Boolean.Input(
"auto_downscale",
default=False,
tooltip="Automatically downscale input image if output would exceed maximum pixel limit.",
),
],
outputs=[
IO.Image.Output(),
],
hidden=[
IO.Hidden.auth_token_comfy_org,
IO.Hidden.api_key_comfy_org,
IO.Hidden.unique_id,
],
is_api_node=True,
price_badge=IO.PriceBadge(
depends_on=IO.PriceBadgeDepends(widgets=["scale_factor"]),
expr="""
(
$max := widgets.scale_factor = "2x" ? 1.326 : 1.657;
{"type": "range_usd", "min_usd": 0.11, "max_usd": $max}
)
""",
),
)
@classmethod
async def execute(
cls,
image: Input.Image,
prompt: str,
scale_factor: str,
optimized_for: str,
creativity: int,
hdr: int,
resemblance: int,
fractality: int,
engine: str,
auto_downscale: bool,
) -> IO.NodeOutput:
if get_number_of_images(image) != 1:
raise ValueError("Exactly one input image is required.")
validate_image_aspect_ratio(image, (1, 3), (3, 1), strict=False)
validate_image_dimensions(image, min_height=160, min_width=160)
max_output_pixels = 25_300_000
height, width = get_image_dimensions(image)
requested_scale = int(scale_factor.rstrip("x"))
output_pixels = height * width * requested_scale * requested_scale
if output_pixels > max_output_pixels:
if auto_downscale:
# Find optimal scale factor that doesn't require >2x downscale.
# Server upscales in 2x steps, so aggressive downscaling degrades quality.
input_pixels = width * height
scale = 2
max_input_pixels = max_output_pixels // 4
for candidate in [16, 8, 4, 2]:
if candidate > requested_scale:
continue
scale_output_pixels = input_pixels * candidate * candidate
if scale_output_pixels <= max_output_pixels:
scale = candidate
max_input_pixels = None
break
downscale_ratio = math.sqrt(scale_output_pixels / max_output_pixels)
if downscale_ratio <= 2.0:
scale = candidate
max_input_pixels = max_output_pixels // (candidate * candidate)
break
if max_input_pixels is not None:
image = downscale_image_tensor(image, total_pixels=max_input_pixels)
scale_factor = f"{scale}x"
else:
raise ValueError(
f"Output size ({width * requested_scale}x{height * requested_scale} = {output_pixels:,} pixels) "
f"exceeds maximum allowed size of {max_output_pixels:,} pixels. "
f"Use a smaller input image or lower scale factor."
)
initial_res = await sync_op(
cls,
ApiEndpoint(path="/proxy/freepik/v1/ai/image-upscaler", method="POST"),
response_model=TaskResponse,
data=ImageUpscalerCreativeRequest(
image=(await upload_images_to_comfyapi(cls, image, max_images=1, total_pixels=None))[0],
scale_factor=scale_factor,
optimized_for=optimized_for,
creativity=creativity,
hdr=hdr,
resemblance=resemblance,
fractality=fractality,
engine=engine,
prompt=prompt if prompt else None,
),
)
final_response = await poll_op(
cls,
ApiEndpoint(path=f"/proxy/freepik/v1/ai/image-upscaler/{initial_res.task_id}"),
response_model=TaskResponse,
status_extractor=lambda x: x.status,
poll_interval=10.0,
max_poll_attempts=480,
)
return IO.NodeOutput(await download_url_to_image_tensor(final_response.generated[0]))
class MagnificImageUpscalerPreciseV2Node(IO.ComfyNode):
@classmethod
def define_schema(cls):
return IO.Schema(
node_id="MagnificImageUpscalerPreciseV2Node",
display_name="Magnific Image Upscale (Precise V2)",
category="api node/image/Magnific",
description="High-fidelity upscaling with fine control over sharpness, grain, and detail. "
"Maximum output: 10060×10060 pixels.",
inputs=[
IO.Image.Input("image"),
IO.Combo.Input("scale_factor", options=["2x", "4x", "8x", "16x"]),
IO.Combo.Input(
"flavor",
options=["sublime", "photo", "photo_denoiser"],
tooltip="Processing style: "
"sublime for general use, photo for photographs, photo_denoiser for noisy photos.",
),
IO.Int.Input(
"sharpen",
min=0,
max=100,
default=7,
tooltip="Image sharpness intensity. Higher values increase edge definition and clarity.",
display_mode=IO.NumberDisplay.slider,
),
IO.Int.Input(
"smart_grain",
min=0,
max=100,
default=7,
tooltip="Intelligent grain/texture enhancement to prevent the image from "
"looking too smooth or artificial.",
display_mode=IO.NumberDisplay.slider,
),
IO.Int.Input(
"ultra_detail",
min=0,
max=100,
default=30,
tooltip="Controls fine detail, textures, and micro-details added during upscaling.",
display_mode=IO.NumberDisplay.slider,
),
IO.Boolean.Input(
"auto_downscale",
default=False,
tooltip="Automatically downscale input image if output would exceed maximum resolution.",
),
],
outputs=[
IO.Image.Output(),
],
hidden=[
IO.Hidden.auth_token_comfy_org,
IO.Hidden.api_key_comfy_org,
IO.Hidden.unique_id,
],
is_api_node=True,
price_badge=IO.PriceBadge(
depends_on=IO.PriceBadgeDepends(widgets=["scale_factor"]),
expr="""
(
$max := widgets.scale_factor = "2x" ? 1.326 : 1.657;
{"type": "range_usd", "min_usd": 0.11, "max_usd": $max}
)
""",
),
)
@classmethod
async def execute(
cls,
image: Input.Image,
scale_factor: str,
flavor: str,
sharpen: int,
smart_grain: int,
ultra_detail: int,
auto_downscale: bool,
) -> IO.NodeOutput:
if get_number_of_images(image) != 1:
raise ValueError("Exactly one input image is required.")
validate_image_aspect_ratio(image, (1, 3), (3, 1), strict=False)
validate_image_dimensions(image, min_height=160, min_width=160)
max_output_dimension = 10060
height, width = get_image_dimensions(image)
requested_scale = int(scale_factor.strip("x"))
output_width = width * requested_scale
output_height = height * requested_scale
if output_width > max_output_dimension or output_height > max_output_dimension:
if auto_downscale:
# Find optimal scale factor that doesn't require >2x downscale.
# Server upscales in 2x steps, so aggressive downscaling degrades quality.
max_dim = max(width, height)
scale = 2
max_input_dim = max_output_dimension // 2
scale_ratio = max_input_dim / max_dim
max_input_pixels = int(width * height * scale_ratio * scale_ratio)
for candidate in [16, 8, 4, 2]:
if candidate > requested_scale:
continue
output_dim = max_dim * candidate
if output_dim <= max_output_dimension:
scale = candidate
max_input_pixels = None
break
downscale_ratio = output_dim / max_output_dimension
if downscale_ratio <= 2.0:
scale = candidate
max_input_dim = max_output_dimension // candidate
scale_ratio = max_input_dim / max_dim
max_input_pixels = int(width * height * scale_ratio * scale_ratio)
break
if max_input_pixels is not None:
image = downscale_image_tensor(image, total_pixels=max_input_pixels)
requested_scale = scale
else:
raise ValueError(
f"Output dimensions ({output_width}x{output_height}) exceed maximum allowed "
f"resolution of {max_output_dimension}x{max_output_dimension} pixels. "
f"Use a smaller input image or lower scale factor."
)
initial_res = await sync_op(
cls,
ApiEndpoint(path="/proxy/freepik/v1/ai/image-upscaler-precision-v2", method="POST"),
response_model=TaskResponse,
data=ImageUpscalerPrecisionV2Request(
image=(await upload_images_to_comfyapi(cls, image, max_images=1, total_pixels=None))[0],
scale_factor=requested_scale,
flavor=flavor,
sharpen=sharpen,
smart_grain=smart_grain,
ultra_detail=ultra_detail,
),
)
final_response = await poll_op(
cls,
ApiEndpoint(path=f"/proxy/freepik/v1/ai/image-upscaler-precision-v2/{initial_res.task_id}"),
response_model=TaskResponse,
status_extractor=lambda x: x.status,
poll_interval=10.0,
max_poll_attempts=480,
)
return IO.NodeOutput(await download_url_to_image_tensor(final_response.generated[0]))
class MagnificImageStyleTransferNode(IO.ComfyNode):
@classmethod
def define_schema(cls):
return IO.Schema(
node_id="MagnificImageStyleTransferNode",
display_name="Magnific Image Style Transfer",
category="api node/image/Magnific",
description="Transfer the style from a reference image to your input image.",
inputs=[
IO.Image.Input("image", tooltip="The image to apply style transfer to."),
IO.Image.Input("reference_image", tooltip="The reference image to extract style from."),
IO.String.Input("prompt", multiline=True, default=""),
IO.Int.Input(
"style_strength",
min=0,
max=100,
default=100,
tooltip="Percentage of style strength.",
display_mode=IO.NumberDisplay.slider,
),
IO.Int.Input(
"structure_strength",
min=0,
max=100,
default=50,
tooltip="Maintains the structure of the original image.",
display_mode=IO.NumberDisplay.slider,
),
IO.Combo.Input(
"flavor",
options=["faithful", "gen_z", "psychedelia", "detaily", "clear", "donotstyle", "donotstyle_sharp"],
tooltip="Style transfer flavor.",
),
IO.Combo.Input(
"engine",
options=[
"balanced",
"definio",
"illusio",
"3d_cartoon",
"colorful_anime",
"caricature",
"real",
"super_real",
"softy",
],
tooltip="Processing engine selection.",
),
IO.DynamicCombo.Input(
"portrait_mode",
options=[
IO.DynamicCombo.Option("disabled", []),
IO.DynamicCombo.Option(
"enabled",
[
IO.Combo.Input(
"portrait_style",
options=["standard", "pop", "super_pop"],
tooltip="Visual style applied to portrait images.",
),
IO.Combo.Input(
"portrait_beautifier",
options=["none", "beautify_face", "beautify_face_max"],
tooltip="Facial beautification intensity on portraits.",
),
],
),
],
tooltip="Enable portrait mode for facial enhancements.",
),
IO.Boolean.Input(
"fixed_generation",
default=True,
tooltip="When disabled, expect each generation to introduce a degree of randomness, "
"leading to more diverse outcomes.",
),
],
outputs=[
IO.Image.Output(),
],
hidden=[
IO.Hidden.auth_token_comfy_org,
IO.Hidden.api_key_comfy_org,
IO.Hidden.unique_id,
],
is_api_node=True,
price_badge=IO.PriceBadge(
expr="""{"type":"usd","usd":0.11}""",
),
)
@classmethod
async def execute(
cls,
image: Input.Image,
reference_image: Input.Image,
prompt: str,
style_strength: int,
structure_strength: int,
flavor: str,
engine: str,
portrait_mode: InputPortraitMode,
fixed_generation: bool,
) -> IO.NodeOutput:
if get_number_of_images(image) != 1:
raise ValueError("Exactly one input image is required.")
if get_number_of_images(reference_image) != 1:
raise ValueError("Exactly one reference image is required.")
validate_image_aspect_ratio(image, (1, 3), (3, 1), strict=False)
validate_image_aspect_ratio(reference_image, (1, 3), (3, 1), strict=False)
validate_image_dimensions(image, min_height=160, min_width=160)
validate_image_dimensions(reference_image, min_height=160, min_width=160)
is_portrait = portrait_mode["portrait_mode"] == "enabled"
portrait_style = portrait_mode.get("portrait_style", "standard")
portrait_beautifier = portrait_mode.get("portrait_beautifier", "none")
uploaded_urls = await upload_images_to_comfyapi(cls, [image, reference_image], max_images=2)
initial_res = await sync_op(
cls,
ApiEndpoint(path="/proxy/freepik/v1/ai/image-style-transfer", method="POST"),
response_model=TaskResponse,
data=ImageStyleTransferRequest(
image=uploaded_urls[0],
reference_image=uploaded_urls[1],
prompt=prompt if prompt else None,
style_strength=style_strength,
structure_strength=structure_strength,
is_portrait=is_portrait,
portrait_style=portrait_style if is_portrait else None,
portrait_beautifier=portrait_beautifier if is_portrait and portrait_beautifier != "none" else None,
flavor=flavor,
engine=engine,
fixed_generation=fixed_generation,
),
)
final_response = await poll_op(
cls,
ApiEndpoint(path=f"/proxy/freepik/v1/ai/image-style-transfer/{initial_res.task_id}"),
response_model=TaskResponse,
status_extractor=lambda x: x.status,
poll_interval=10.0,
max_poll_attempts=480,
)
return IO.NodeOutput(await download_url_to_image_tensor(final_response.generated[0]))
class MagnificImageRelightNode(IO.ComfyNode):
@classmethod
def define_schema(cls):
return IO.Schema(
node_id="MagnificImageRelightNode",
display_name="Magnific Image Relight",
category="api node/image/Magnific",
description="Relight an image with lighting adjustments and optional reference-based light transfer.",
inputs=[
IO.Image.Input("image", tooltip="The image to relight."),
IO.String.Input(
"prompt",
multiline=True,
default="",
tooltip="Descriptive guidance for lighting. Supports emphasis notation (1-1.4).",
),
IO.Int.Input(
"light_transfer_strength",
min=0,
max=100,
default=100,
tooltip="Intensity of light transfer application.",
display_mode=IO.NumberDisplay.slider,
),
IO.Combo.Input(
"style",
options=[
"standard",
"darker_but_realistic",
"clean",
"smooth",
"brighter",
"contrasted_n_hdr",
"just_composition",
],
tooltip="Stylistic output preference.",
),
IO.Boolean.Input(
"interpolate_from_original",
default=False,
tooltip="Restricts generation freedom to match original more closely.",
),
IO.Boolean.Input(
"change_background",
default=True,
tooltip="Modifies background based on prompt/reference.",
),
IO.Boolean.Input(
"preserve_details",
default=True,
tooltip="Maintains texture and fine details from original.",
),
IO.DynamicCombo.Input(
"advanced_settings",
options=[
IO.DynamicCombo.Option("disabled", []),
IO.DynamicCombo.Option(
"enabled",
[
IO.Int.Input(
"whites",
min=0,
max=100,
default=50,
tooltip="Adjusts the brightest tones in the image.",
display_mode=IO.NumberDisplay.slider,
),
IO.Int.Input(
"blacks",
min=0,
max=100,
default=50,
tooltip="Adjusts the darkest tones in the image.",
display_mode=IO.NumberDisplay.slider,
),
IO.Int.Input(
"brightness",
min=0,
max=100,
default=50,
tooltip="Overall brightness adjustment.",
display_mode=IO.NumberDisplay.slider,
),
IO.Int.Input(
"contrast",
min=0,
max=100,
default=50,
tooltip="Contrast adjustment.",
display_mode=IO.NumberDisplay.slider,
),
IO.Int.Input(
"saturation",
min=0,
max=100,
default=50,
tooltip="Color saturation adjustment.",
display_mode=IO.NumberDisplay.slider,
),
IO.Combo.Input(
"engine",
options=[
"automatic",
"balanced",
"cool",
"real",
"illusio",
"fairy",
"colorful_anime",
"hard_transform",
"softy",
],
tooltip="Processing engine selection.",
),
IO.Combo.Input(
"transfer_light_a",
options=["automatic", "low", "medium", "normal", "high", "high_on_faces"],
tooltip="The intensity of light transfer.",
),
IO.Combo.Input(
"transfer_light_b",
options=[
"automatic",
"composition",
"straight",
"smooth_in",
"smooth_out",
"smooth_both",
"reverse_both",
"soft_in",
"soft_out",
"soft_mid",
# "strong_mid", # Commented out because requests fail when this is set.
"style_shift",
"strong_shift",
],
tooltip="Also modifies light transfer intensity. "
"Can be combined with the previous control for varied effects.",
),
IO.Boolean.Input(
"fixed_generation",
default=True,
tooltip="Ensures consistent output with the same settings.",
),
],
),
],
tooltip="Fine-tuning options for advanced lighting control.",
),
IO.Image.Input(
"reference_image",
optional=True,
tooltip="Optional reference image to transfer lighting from.",
),
],
outputs=[
IO.Image.Output(),
],
hidden=[
IO.Hidden.auth_token_comfy_org,
IO.Hidden.api_key_comfy_org,
IO.Hidden.unique_id,
],
is_api_node=True,
price_badge=IO.PriceBadge(
expr="""{"type":"usd","usd":0.11}""",
),
)
@classmethod
async def execute(
cls,
image: Input.Image,
prompt: str,
light_transfer_strength: int,
style: str,
interpolate_from_original: bool,
change_background: bool,
preserve_details: bool,
advanced_settings: InputAdvancedSettings,
reference_image: Input.Image | None = None,
) -> IO.NodeOutput:
if get_number_of_images(image) != 1:
raise ValueError("Exactly one input image is required.")
if reference_image is not None and get_number_of_images(reference_image) != 1:
raise ValueError("Exactly one reference image is required.")
validate_image_aspect_ratio(image, (1, 3), (3, 1), strict=False)
validate_image_dimensions(image, min_height=160, min_width=160)
if reference_image is not None:
validate_image_aspect_ratio(reference_image, (1, 3), (3, 1), strict=False)
validate_image_dimensions(reference_image, min_height=160, min_width=160)
image_url = (await upload_images_to_comfyapi(cls, image, max_images=1))[0]
reference_url = None
if reference_image is not None:
reference_url = (await upload_images_to_comfyapi(cls, reference_image, max_images=1))[0]
adv_settings = None
if advanced_settings["advanced_settings"] == "enabled":
adv_settings = ImageRelightAdvancedSettingsRequest(
whites=advanced_settings["whites"],
blacks=advanced_settings["blacks"],
brightness=advanced_settings["brightness"],
contrast=advanced_settings["contrast"],
saturation=advanced_settings["saturation"],
engine=advanced_settings["engine"],
transfer_light_a=advanced_settings["transfer_light_a"],
transfer_light_b=advanced_settings["transfer_light_b"],
fixed_generation=advanced_settings["fixed_generation"],
)
initial_res = await sync_op(
cls,
ApiEndpoint(path="/proxy/freepik/v1/ai/image-relight", method="POST"),
response_model=TaskResponse,
data=ImageRelightRequest(
image=image_url,
prompt=prompt if prompt else None,
transfer_light_from_reference_image=reference_url,
light_transfer_strength=light_transfer_strength,
interpolate_from_original=interpolate_from_original,
change_background=change_background,
style=style,
preserve_details=preserve_details,
advanced_settings=adv_settings,
),
)
final_response = await poll_op(
cls,
ApiEndpoint(path=f"/proxy/freepik/v1/ai/image-relight/{initial_res.task_id}"),
response_model=TaskResponse,
status_extractor=lambda x: x.status,
poll_interval=10.0,
max_poll_attempts=480,
)
return IO.NodeOutput(await download_url_to_image_tensor(final_response.generated[0]))
class MagnificImageSkinEnhancerNode(IO.ComfyNode):
@classmethod
def define_schema(cls):
return IO.Schema(
node_id="MagnificImageSkinEnhancerNode",
display_name="Magnific Image Skin Enhancer",
category="api node/image/Magnific",
description="Skin enhancement for portraits with multiple processing modes.",
inputs=[
IO.Image.Input("image", tooltip="The portrait image to enhance."),
IO.Int.Input(
"sharpen",
min=0,
max=100,
default=0,
tooltip="Sharpening intensity level.",
display_mode=IO.NumberDisplay.slider,
),
IO.Int.Input(
"smart_grain",
min=0,
max=100,
default=2,
tooltip="Smart grain intensity level.",
display_mode=IO.NumberDisplay.slider,
),
IO.DynamicCombo.Input(
"mode",
options=[
IO.DynamicCombo.Option("creative", []),
IO.DynamicCombo.Option(
"faithful",
[
IO.Int.Input(
"skin_detail",
min=0,
max=100,
default=80,
tooltip="Skin detail enhancement level.",
display_mode=IO.NumberDisplay.slider,
),
],
),
IO.DynamicCombo.Option(
"flexible",
[
IO.Combo.Input(
"optimized_for",
options=[
"enhance_skin",
"improve_lighting",
"enhance_everything",
"transform_to_real",
"no_make_up",
],
tooltip="Enhancement optimization target.",
),
],
),
],
tooltip="Processing mode: creative for artistic enhancement, "
"faithful for preserving original appearance, "
"flexible for targeted optimization.",
),
],
outputs=[
IO.Image.Output(),
],
hidden=[
IO.Hidden.auth_token_comfy_org,
IO.Hidden.api_key_comfy_org,
IO.Hidden.unique_id,
],
is_api_node=True,
price_badge=IO.PriceBadge(
depends_on=IO.PriceBadgeDepends(widgets=["mode"]),
expr="""
(
$rates := {"creative": 0.29, "faithful": 0.37, "flexible": 0.45};
{"type":"usd","usd": $lookup($rates, widgets.mode)}
)
""",
),
)
@classmethod
async def execute(
cls,
image: Input.Image,
sharpen: int,
smart_grain: int,
mode: InputSkinEnhancerMode,
) -> IO.NodeOutput:
if get_number_of_images(image) != 1:
raise ValueError("Exactly one input image is required.")
validate_image_aspect_ratio(image, (1, 3), (3, 1), strict=False)
validate_image_dimensions(image, min_height=160, min_width=160)
image_url = (await upload_images_to_comfyapi(cls, image, max_images=1, total_pixels=4096 * 4096))[0]
selected_mode = mode["mode"]
if selected_mode == "creative":
endpoint = "creative"
data = ImageSkinEnhancerCreativeRequest(
image=image_url,
sharpen=sharpen,
smart_grain=smart_grain,
)
elif selected_mode == "faithful":
endpoint = "faithful"
data = ImageSkinEnhancerFaithfulRequest(
image=image_url,
sharpen=sharpen,
smart_grain=smart_grain,
skin_detail=mode["skin_detail"],
)
else: # flexible
endpoint = "flexible"
data = ImageSkinEnhancerFlexibleRequest(
image=image_url,
sharpen=sharpen,
smart_grain=smart_grain,
optimized_for=mode["optimized_for"],
)
initial_res = await sync_op(
cls,
ApiEndpoint(path=f"/proxy/freepik/v1/ai/skin-enhancer/{endpoint}", method="POST"),
response_model=TaskResponse,
data=data,
)
final_response = await poll_op(
cls,
ApiEndpoint(path=f"/proxy/freepik/v1/ai/skin-enhancer/{initial_res.task_id}"),
response_model=TaskResponse,
status_extractor=lambda x: x.status,
poll_interval=10.0,
max_poll_attempts=480,
)
return IO.NodeOutput(await download_url_to_image_tensor(final_response.generated[0]))
class MagnificExtension(ComfyExtension):
@override
async def get_node_list(self) -> list[type[IO.ComfyNode]]:
return [
# MagnificImageUpscalerCreativeNode,
# MagnificImageUpscalerPreciseV2Node,
MagnificImageStyleTransferNode,
MagnificImageRelightNode,
MagnificImageSkinEnhancerNode,
]
async def comfy_entrypoint() -> MagnificExtension:
return MagnificExtension()

View File

@ -56,15 +56,14 @@ def image_tensor_pair_to_batch(image1: torch.Tensor, image2: torch.Tensor) -> to
def tensor_to_bytesio(
image: torch.Tensor,
*,
total_pixels: int = 2048 * 2048,
total_pixels: int | None = 2048 * 2048,
mime_type: str = "image/png",
) -> BytesIO:
"""Converts a torch.Tensor image to a named BytesIO object.
Args:
image: Input torch.Tensor image.
name: Optional filename for the BytesIO object.
total_pixels: Maximum total pixels for potential downscaling.
total_pixels: Maximum total pixels for downscaling. If None, no downscaling is performed.
mime_type: Target image MIME type (e.g., 'image/png', 'image/jpeg', 'image/webp', 'video/mp4').
Returns:
@ -79,13 +78,14 @@ def tensor_to_bytesio(
return img_binary
def tensor_to_pil(image: torch.Tensor, total_pixels: int = 2048 * 2048) -> Image.Image:
def tensor_to_pil(image: torch.Tensor, total_pixels: int | None = 2048 * 2048) -> Image.Image:
"""Converts a single torch.Tensor image [H, W, C] to a PIL Image, optionally downscaling."""
if len(image.shape) > 3:
image = image[0]
# TODO: remove alpha if not allowed and present
input_tensor = image.cpu()
input_tensor = downscale_image_tensor(input_tensor.unsqueeze(0), total_pixels=total_pixels).squeeze()
if total_pixels is not None:
input_tensor = downscale_image_tensor(input_tensor.unsqueeze(0), total_pixels=total_pixels).squeeze()
image_np = (input_tensor.numpy() * 255).astype(np.uint8)
img = Image.fromarray(image_np)
return img
@ -93,14 +93,14 @@ def tensor_to_pil(image: torch.Tensor, total_pixels: int = 2048 * 2048) -> Image
def tensor_to_base64_string(
image_tensor: torch.Tensor,
total_pixels: int = 2048 * 2048,
total_pixels: int | None = 2048 * 2048,
mime_type: str = "image/png",
) -> str:
"""Convert [B, H, W, C] or [H, W, C] tensor to a base64 string.
Args:
image_tensor: Input torch.Tensor image.
total_pixels: Maximum total pixels for potential downscaling.
total_pixels: Maximum total pixels for downscaling. If None, no downscaling is performed.
mime_type: Target image MIME type (e.g., 'image/png', 'image/jpeg', 'image/webp', 'video/mp4').
Returns:
@ -161,14 +161,14 @@ def downscale_image_tensor_by_max_side(image: torch.Tensor, *, max_side: int) -
def tensor_to_data_uri(
image_tensor: torch.Tensor,
total_pixels: int = 2048 * 2048,
total_pixels: int | None = 2048 * 2048,
mime_type: str = "image/png",
) -> str:
"""Converts a tensor image to a Data URI string.
Args:
image_tensor: Input torch.Tensor image.
total_pixels: Maximum total pixels for potential downscaling.
total_pixels: Maximum total pixels for downscaling. If None, no downscaling is performed.
mime_type: Target image MIME type (e.g., 'image/png', 'image/jpeg', 'image/webp').
Returns:

View File

@ -49,7 +49,7 @@ async def upload_images_to_comfyapi(
mime_type: str | None = None,
wait_label: str | None = "Uploading",
show_batch_index: bool = True,
total_pixels: int = 2048 * 2048,
total_pixels: int | None = 2048 * 2048,
) -> list[str]:
"""
Uploads images to ComfyUI API and returns download URLs.

View File

@ -701,7 +701,14 @@ class Noise_EmptyNoise:
def generate_noise(self, input_latent):
latent_image = input_latent["samples"]
return torch.zeros(latent_image.shape, dtype=latent_image.dtype, layout=latent_image.layout, device="cpu")
if latent_image.is_nested:
tensors = latent_image.unbind()
zeros = []
for t in tensors:
zeros.append(torch.zeros(t.shape, dtype=t.dtype, layout=t.layout, device="cpu"))
return comfy.nested_tensor.NestedTensor(zeros)
else:
return torch.zeros(latent_image.shape, dtype=latent_image.dtype, layout=latent_image.layout, device="cpu")
class Noise_RandomNoise:

View File

@ -223,11 +223,24 @@ class LTXVAddGuide(io.ComfyNode):
return frame_idx, latent_idx
@classmethod
def add_keyframe_index(cls, cond, frame_idx, guiding_latent, scale_factors):
def add_keyframe_index(cls, cond, frame_idx, guiding_latent, scale_factors, latent_downscale_factor=1):
keyframe_idxs, _ = get_keyframe_idxs(cond)
_, latent_coords = cls.PATCHIFIER.patchify(guiding_latent)
pixel_coords = latent_to_pixel_coords(latent_coords, scale_factors, causal_fix=frame_idx == 0) # we need the causal fix only if we're placing the new latents at index 0
pixel_coords[:, 0] += frame_idx
# The following adjusts keyframe end positions for small grid IC-LoRA.
# After dilation, the small grid has the same size and position as the large grid,
# but each token encodes a larger image patch. We adjust the end position (not start)
# so that RoPE represents the correct middle point of each token.
# keyframe_idxs dims: (batch, spatial_dim [t,h,w], token_id, [start, end])
# We only adjust h,w (not t) in dim 1, and only end (not start) in dim 3.
spatial_end_offset = (latent_downscale_factor - 1) * torch.tensor(
scale_factors[1:],
device=pixel_coords.device,
).view(1, -1, 1, 1)
pixel_coords[:, 1:, :, 1:] += spatial_end_offset.to(pixel_coords.dtype)
if keyframe_idxs is None:
keyframe_idxs = pixel_coords
else:
@ -235,12 +248,12 @@ class LTXVAddGuide(io.ComfyNode):
return node_helpers.conditioning_set_values(cond, {"keyframe_idxs": keyframe_idxs})
@classmethod
def append_keyframe(cls, positive, negative, frame_idx, latent_image, noise_mask, guiding_latent, strength, scale_factors, guide_mask=None, in_channels=128):
def append_keyframe(cls, positive, negative, frame_idx, latent_image, noise_mask, guiding_latent, strength, scale_factors, guide_mask=None, in_channels=128, latent_downscale_factor=1):
if latent_image.shape[1] != in_channels or guiding_latent.shape[1] != in_channels:
raise ValueError("Adding guide to a combined AV latent is not supported.")
positive = cls.add_keyframe_index(positive, frame_idx, guiding_latent, scale_factors)
negative = cls.add_keyframe_index(negative, frame_idx, guiding_latent, scale_factors)
positive = cls.add_keyframe_index(positive, frame_idx, guiding_latent, scale_factors, latent_downscale_factor)
negative = cls.add_keyframe_index(negative, frame_idx, guiding_latent, scale_factors, latent_downscale_factor)
if guide_mask is not None:
target_h = max(noise_mask.shape[3], guide_mask.shape[3])

View File

@ -1,3 +1,3 @@
# This file is automatically generated by the build process when version is
# updated in pyproject.toml.
__version__ = "0.10.0"
__version__ = "0.11.0"

View File

@ -47,6 +47,7 @@ folder_names_and_paths["hypernetworks"] = ([os.path.join(models_dir, "hypernetwo
folder_names_and_paths["photomaker"] = ([os.path.join(models_dir, "photomaker")], supported_pt_extensions)
folder_names_and_paths["classifiers"] = ([os.path.join(models_dir, "classifiers")], {""})
folder_names_and_paths["hidream_empty_latents"] = ([os.path.join(models_dir, "hidream_empty_latents")], supported_pt_extensions)
folder_names_and_paths["model_patches"] = ([os.path.join(models_dir, "model_patches")], supported_pt_extensions)

Binary file not shown.

Binary file not shown.

View File

@ -1,6 +1,6 @@
[project]
name = "ComfyUI"
version = "0.10.0"
version = "0.11.0"
readme = "README.md"
license = { file = "LICENSE" }
requires-python = ">=3.10"

View File

@ -1,5 +1,5 @@
comfyui-frontend-package==1.37.11
comfyui-workflow-templates==0.8.15
comfyui-workflow-templates==0.8.24
comfyui-embedded-docs==0.4.0
torch
torchsde
@ -22,6 +22,7 @@ alembic
SQLAlchemy
av>=14.2.0
comfy-kitchen>=0.2.7
requests
#non essential dependencies:
kornia>=0.7.1