Compare commits

...

13 Commits

Author SHA1 Message Date
David Lee
7e50fafd53
Merge de97192962 into c011fb520c 2026-05-07 23:59:45 +01:00
Alexander Piskun
c011fb520c
[Partner Nodes] new NanoBanana2 node with DynamicCombo/Autogrow (#13753)
Some checks are pending
Python Linting / Run Ruff (push) Waiting to run
Python Linting / Run Pylint (push) Waiting to run
Full Comfy CI Workflow Runs / test-stable (12.1, , linux, 3.10, [self-hosted Linux], stable) (push) Waiting to run
Full Comfy CI Workflow Runs / test-stable (12.1, , linux, 3.11, [self-hosted Linux], stable) (push) Waiting to run
Full Comfy CI Workflow Runs / test-stable (12.1, , linux, 3.12, [self-hosted Linux], stable) (push) Waiting to run
Full Comfy CI Workflow Runs / test-unix-nightly (12.1, , linux, 3.11, [self-hosted Linux], nightly) (push) Waiting to run
Execution Tests / test (macos-latest) (push) Waiting to run
Execution Tests / test (ubuntu-latest) (push) Waiting to run
Execution Tests / test (windows-latest) (push) Waiting to run
Test server launches without errors / test (push) Waiting to run
Unit Tests / test (macos-latest) (push) Waiting to run
Unit Tests / test (ubuntu-latest) (push) Waiting to run
Unit Tests / test (windows-2022) (push) Waiting to run
* feat(api-nodes): new NanoBanana2 node with  DynamicCombo/Autogrow

Signed-off-by: bigcat88 <bigcat88@icloud.com>

* feat: improved status text on uploading

Signed-off-by: bigcat88 <bigcat88@icloud.com>

* feat: improved status text on uploading (2)

Signed-off-by: bigcat88 <bigcat88@icloud.com>

---------

Signed-off-by: bigcat88 <bigcat88@icloud.com>
2026-05-07 12:19:44 -07:00
Alexander Piskun
c945a433ae
fix(api-nodes): fixed price badge for Kling V3 model in the Motion Control node (#13790)
Signed-off-by: bigcat88 <bigcat88@icloud.com>
2026-05-07 11:55:09 -07:00
David Lee
de97192962
Update RGBMaskToLatentMask to raise more detailed error if non-causal VAE inputted 2026-05-04 15:46:35 -04:00
David Lee
d56a093800
Update description of RGBMaskToLatentMask
Clarifies that node is intended to work with causal Video VAEs.
2026-05-04 15:30:51 -04:00
David Lee
8dd41ef82e
Handle case where start_step is greater than last_step 2026-05-04 15:26:21 -04:00
David Lee
b715186140
Merge branch 'master' into time_to_move 2026-05-04 15:08:18 -04:00
David Lee
ae54d7a987
Fix compositing error, change input arguments 2026-05-04 13:50:39 -04:00
David Lee
f3aebfa2b0
Remove VideoLatentCompositeMasked node 2026-05-02 19:51:45 -04:00
David Lee
b3a066559b
Fix new composite function not being defined 2026-05-02 18:37:50 -04:00
David Lee
0b7d56070d
Fix tensor on wrong device error; slight change to logic 2026-05-02 18:28:54 -04:00
David Lee
c3cd2a4e75
Add TimeToMoveKSamplerAdvanced 2026-05-02 15:28:35 -04:00
David Lee
800bf842a5
Add VideoLatentCompositeMasked and RGBMaskToLatentMask nodes 2026-05-02 15:23:42 -04:00
4 changed files with 468 additions and 25 deletions

View File

@ -83,13 +83,16 @@ class GeminiImageModel(str, Enum):
async def create_image_parts(
cls: type[IO.ComfyNode],
images: Input.Image,
images: Input.Image | list[Input.Image],
image_limit: int = 0,
) -> list[GeminiPart]:
image_parts: list[GeminiPart] = []
if image_limit < 0:
raise ValueError("image_limit must be greater than or equal to 0 when creating Gemini image parts.")
total_images = get_number_of_images(images)
# Accept either a single (possibly-batched) tensor or a list of them; share URL budget across all.
images_list: list[Input.Image] = images if isinstance(images, list) else [images]
total_images = sum(get_number_of_images(img) for img in images_list)
if total_images <= 0:
raise ValueError("No images provided to create_image_parts; at least one image is required.")
@ -98,10 +101,18 @@ async def create_image_parts(
# Number of images we'll send as URLs (fileData)
num_url_images = min(effective_max, 10) # Vertex API max number of image links
upload_kwargs: dict = {"wait_label": "Uploading reference images"}
if effective_max > num_url_images:
# Split path (e.g. 11+ images): suppress per-image counter to avoid a confusing dual-fraction label.
upload_kwargs = {
"wait_label": f"Uploading reference images ({num_url_images}+)",
"show_batch_index": False,
}
reference_images_urls = await upload_images_to_comfyapi(
cls,
images,
images_list,
max_images=num_url_images,
**upload_kwargs,
)
for reference_image_url in reference_images_urls:
image_parts.append(
@ -112,15 +123,22 @@ async def create_image_parts(
)
)
)
for idx in range(num_url_images, effective_max):
image_parts.append(
GeminiPart(
inlineData=GeminiInlineData(
mimeType=GeminiMimeType.image_png,
data=tensor_to_base64_string(images[idx]),
if effective_max > num_url_images:
flat: list[torch.Tensor] = []
for tensor in images_list:
if len(tensor.shape) == 4:
flat.extend(tensor[i] for i in range(tensor.shape[0]))
else:
flat.append(tensor)
for idx in range(num_url_images, effective_max):
image_parts.append(
GeminiPart(
inlineData=GeminiInlineData(
mimeType=GeminiMimeType.image_png,
data=tensor_to_base64_string(flat[idx]),
)
)
)
)
return image_parts
@ -891,10 +909,6 @@ class GeminiNanoBanana2(IO.ComfyNode):
"9:16",
"16:9",
"21:9",
# "1:4",
# "4:1",
# "8:1",
# "1:8",
],
default="auto",
tooltip="If set to 'auto', matches your input image's aspect ratio; "
@ -902,12 +916,7 @@ class GeminiNanoBanana2(IO.ComfyNode):
),
IO.Combo.Input(
"resolution",
options=[
# "512px",
"1K",
"2K",
"4K",
],
options=["1K", "2K", "4K"],
tooltip="Target output resolution. For 2K/4K the native Gemini upscaler is used.",
),
IO.Combo.Input(
@ -956,6 +965,7 @@ class GeminiNanoBanana2(IO.ComfyNode):
],
is_api_node=True,
price_badge=GEMINI_IMAGE_2_PRICE_BADGE,
is_deprecated=True,
)
@classmethod
@ -1016,6 +1026,197 @@ class GeminiNanoBanana2(IO.ComfyNode):
)
def _nano_banana_2_v2_model_inputs():
return [
IO.Combo.Input(
"aspect_ratio",
options=[
"auto",
"1:1",
"2:3",
"3:2",
"3:4",
"4:3",
"4:5",
"5:4",
"9:16",
"16:9",
"21:9",
"1:4",
"4:1",
"8:1",
"1:8",
],
default="auto",
tooltip="If set to 'auto', matches your input image's aspect ratio; "
"if no image is provided, a 16:9 square is usually generated.",
),
IO.Combo.Input(
"resolution",
options=["1K", "2K", "4K"],
tooltip="Target output resolution. For 2K/4K the native Gemini upscaler is used.",
),
IO.Combo.Input(
"thinking_level",
options=["MINIMAL", "HIGH"],
),
IO.Autogrow.Input(
"images",
template=IO.Autogrow.TemplateNames(
IO.Image.Input("image"),
names=[f"image_{i}" for i in range(1, 15)],
min=0,
),
tooltip="Optional reference image(s). Up to 14 images total.",
),
IO.Custom("GEMINI_INPUT_FILES").Input(
"files",
optional=True,
tooltip="Optional file(s) to use as context for the model. "
"Accepts inputs from the Gemini Generate Content Input Files node.",
),
]
class GeminiNanoBanana2V2(IO.ComfyNode):
@classmethod
def define_schema(cls):
return IO.Schema(
node_id="GeminiNanoBanana2V2",
display_name="Nano Banana 2",
category="api node/image/Gemini",
description="Generate or edit images synchronously via Google Vertex API.",
inputs=[
IO.String.Input(
"prompt",
multiline=True,
tooltip="Text prompt describing the image to generate or the edits to apply. "
"Include any constraints, styles, or details the model should follow.",
default="",
),
IO.DynamicCombo.Input(
"model",
options=[
IO.DynamicCombo.Option(
"Nano Banana 2 (Gemini 3.1 Flash Image)",
_nano_banana_2_v2_model_inputs(),
),
],
),
IO.Int.Input(
"seed",
default=42,
min=0,
max=0xFFFFFFFFFFFFFFFF,
control_after_generate=True,
tooltip="When the seed is fixed to a specific value, the model makes a best effort to provide "
"the same response for repeated requests. Deterministic output isn't guaranteed. "
"Also, changing the model or parameter settings, such as the temperature, "
"can cause variations in the response even when you use the same seed value. "
"By default, a random seed value is used.",
),
IO.Combo.Input(
"response_modalities",
options=["IMAGE", "IMAGE+TEXT"],
advanced=True,
),
IO.String.Input(
"system_prompt",
multiline=True,
default=GEMINI_IMAGE_SYS_PROMPT,
optional=True,
tooltip="Foundational instructions that dictate an AI's behavior.",
advanced=True,
),
],
outputs=[
IO.Image.Output(),
IO.String.Output(),
IO.Image.Output(
display_name="thought_image",
tooltip="First image from the model's thinking process. "
"Only available with thinking_level HIGH and IMAGE+TEXT modality.",
),
],
hidden=[
IO.Hidden.auth_token_comfy_org,
IO.Hidden.api_key_comfy_org,
IO.Hidden.unique_id,
],
is_api_node=True,
price_badge=IO.PriceBadge(
depends_on=IO.PriceBadgeDepends(widgets=["model", "model.resolution"]),
expr="""
(
$r := $lookup(widgets, "model.resolution");
$prices := {"1k": 0.0696, "2k": 0.1014, "4k": 0.154};
{"type":"usd","usd": $lookup($prices, $r), "format":{"suffix":"/Image","approximate":true}}
)
""",
),
)
@classmethod
async def execute(
cls,
prompt: str,
model: dict,
seed: int,
response_modalities: str,
system_prompt: str = "",
) -> IO.NodeOutput:
validate_string(prompt, strip_whitespace=True, min_length=1)
model_choice = model["model"]
if model_choice == "Nano Banana 2 (Gemini 3.1 Flash Image)":
model_id = "gemini-3.1-flash-image-preview"
else:
model_id = model_choice
images = model.get("images") or {}
parts: list[GeminiPart] = [GeminiPart(text=prompt)]
if images:
image_tensors: list[Input.Image] = [t for t in images.values() if t is not None]
if image_tensors:
if sum(get_number_of_images(t) for t in image_tensors) > 14:
raise ValueError("The current maximum number of supported images is 14.")
parts.extend(await create_image_parts(cls, image_tensors))
files = model.get("files")
if files is not None:
parts.extend(files)
image_config = GeminiImageConfig(imageSize=model["resolution"])
if model["aspect_ratio"] != "auto":
image_config.aspectRatio = model["aspect_ratio"]
gemini_system_prompt = None
if system_prompt:
gemini_system_prompt = GeminiSystemInstructionContent(parts=[GeminiTextPart(text=system_prompt)], role=None)
response = await sync_op(
cls,
ApiEndpoint(path=f"/proxy/vertexai/gemini/{model_id}", method="POST"),
data=GeminiImageGenerateContentRequest(
contents=[
GeminiContent(role=GeminiRole.user, parts=parts),
],
generationConfig=GeminiImageGenerationConfig(
responseModalities=(["IMAGE"] if response_modalities == "IMAGE" else ["TEXT", "IMAGE"]),
imageConfig=image_config,
thinkingConfig=GeminiThinkingConfig(thinkingLevel=model["thinking_level"]),
),
systemInstruction=gemini_system_prompt,
),
response_model=GeminiGenerateContentResponse,
price_extractor=calculate_tokens_price,
)
return IO.NodeOutput(
await get_image_from_response(response),
get_text_from_response(response),
await get_image_from_response(response, thought=True),
)
class GeminiExtension(ComfyExtension):
@override
async def get_node_list(self) -> list[type[IO.ComfyNode]]:
@ -1024,6 +1225,7 @@ class GeminiExtension(ComfyExtension):
GeminiImage,
GeminiImage2,
GeminiNanoBanana2,
GeminiNanoBanana2V2,
GeminiInputFiles,
]

View File

@ -2787,11 +2787,15 @@ class MotionControl(IO.ComfyNode):
],
is_api_node=True,
price_badge=IO.PriceBadge(
depends_on=IO.PriceBadgeDepends(widgets=["mode"]),
depends_on=IO.PriceBadgeDepends(widgets=["mode", "model"]),
expr="""
(
$prices := {"std": 0.07, "pro": 0.112};
{"type":"usd","usd": $lookup($prices, widgets.mode), "format":{"suffix":"/second"}}
$prices := {
"kling-v3": {"std": 0.126, "pro": 0.168},
"kling-v2-6": {"std": 0.07, "pro": 0.112}
};
$modelPrices := $lookup($prices, widgets.model);
{"type":"usd","usd": $lookup($modelPrices, widgets.mode), "format":{"suffix":"/second"}}
)
""",
),

View File

@ -11,6 +11,142 @@ from typing_extensions import override
from comfy_api.latest import ComfyExtension, io
import re
def video_latent_composite(destination, source, x, y, mask=None, multiplier=8, resize_source=False):
# destination/source shape: [B, C, F, H, W]
source = source.to(destination.device)
if resize_source:
target_size = (source.shape[2], destination.shape[3], destination.shape[4])
source = torch.nn.functional.interpolate(
source,
size=target_size,
mode="trilinear",
align_corners=False
)
x_latent = x // multiplier
y_latent = y // multiplier
if mask is None:
mask = torch.ones_like(source)
else:
mask = mask.to(destination.device, copy=True)
mask = mask.unsqueeze(0).unsqueeze(0)
mask_target_size = (mask.shape[2], source.shape[3], source.shape[4])
mask = torch.nn.functional.interpolate(
mask,
size=mask_target_size,
mode="trilinear",
align_corners=False
)
dst_h, dst_w = destination.shape[3], destination.shape[4]
src_h, src_w = source.shape[3], source.shape[4]
visible_h = max(0, min(y_latent + src_h, dst_h) - max(0, y_latent))
visible_w = max(0, min(x_latent + src_w, dst_w) - max(0, x_latent))
if visible_h <= 0 or visible_w <= 0:
return destination
src_top = max(0, -y_latent)
src_left = max(0, -x_latent)
dst_top = max(0, y_latent)
dst_left = max(0, x_latent)
m = mask[:, :, :, src_top:src_top+visible_h, src_left:src_left+visible_w]
s = source[:, :, :, src_top:src_top+visible_h, src_left:src_left+visible_w]
d = destination[:, :, :, dst_top:dst_top+visible_h, dst_left:dst_left+visible_w]
destination[:, :, :, dst_top:dst_top+visible_h, dst_left:dst_left+visible_w] = (m * s) + ((1.0 - m) * d)
return destination
def time_to_move_sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, latent_mask, denoise=1.0, start_step=None, time_to_move_last_step=None, last_step=None, force_full_denoise=False, noise_mask=None, sigmas=None, callback=None, disable_pbar=False, seed=None):
sampler = comfy.samplers.KSampler(model, steps=steps, device=model.load_device, sampler=sampler_name, scheduler=scheduler, denoise=denoise, model_options=model.model_options)
model_sampling = model.get_model_object("model_sampling")
process_latent_out = model.get_model_object("process_latent_out")
process_latent_in = model.get_model_object("process_latent_in")
reference_latent_image = latent_image.clone()
reference_sigmas = sampler.sigmas
reference_noise = noise.clone()
if last_step == None or last_step > steps:
last_step = steps
if time_to_move_last_step == None or time_to_move_last_step > last_step:
time_to_move_last_step = last_step
if start_step == None:
start_step = 0
total_iterations = min(last_step, steps) - start_step
if total_iterations <= 0:
return latent_image.to(
device=comfy.model_management.intermediate_device(),
dtype=comfy.model_management.intermediate_dtype(),
)
for i in range(total_iterations):
if i > 0:
#don't add new noise to samples after first step taken
noise = torch.zeros(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, device="cpu")
temp_start = start_step + i
if temp_start < last_step - 1:
temp_force_full_denoise = False
else:
temp_force_full_denoise = force_full_denoise
samples = sampler.sample(noise, positive, negative, cfg=cfg, latent_image=latent_image, start_step=temp_start, last_step=temp_start + 1, force_full_denoise=temp_force_full_denoise, denoise_mask=noise_mask, sigmas=sigmas, callback=callback, disable_pbar=disable_pbar, seed=seed)
if temp_start < time_to_move_last_step:
scale = reference_sigmas[temp_start + 1].to(noise.device)
if torch.count_nonzero(reference_latent_image) > 0: #Don't shift the empty latent image.
noisy = model_sampling.noise_scaling(scale, reference_noise, process_latent_in(reference_latent_image))
noisy = model_sampling.inverse_noise_scaling(scale, noisy)
noisy = process_latent_out(noisy)
else:
noisy = reference_latent_image
noisy.to(samples.device)
samples = video_latent_composite(samples, noisy, 0, 0, latent_mask, multiplier=1, resize_source=True)
latent_image = samples
samples = samples.to(device=comfy.model_management.intermediate_device(), dtype=comfy.model_management.intermediate_dtype())
return samples
def time_to_move_common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent, latent_mask, denoise=1.0, disable_noise=False, start_step=None, time_to_move_last_step = None, last_step=None, force_full_denoise=False):
latent_image = latent["samples"]
latent_image = comfy.sample.fix_empty_latent_channels(model, latent_image, latent.get("downscale_ratio_spacial", None))
if disable_noise:
noise = torch.zeros(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, device="cpu")
else:
batch_inds = latent["batch_index"] if "batch_index" in latent else None
noise = comfy.sample.prepare_noise(latent_image, seed, batch_inds)
noise_mask = None
if "noise_mask" in latent:
noise_mask = latent["noise_mask"]
callback = latent_preview.prepare_callback(model, steps)
disable_pbar = not comfy.utils.PROGRESS_BAR_ENABLED
samples = time_to_move_sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, latent_mask,
denoise=denoise, start_step=start_step, time_to_move_last_step = time_to_move_last_step, last_step=last_step,
force_full_denoise=force_full_denoise, noise_mask=noise_mask, callback=callback, disable_pbar=disable_pbar, seed=seed)
out = latent.copy()
out.pop("downscale_ratio_spacial", None)
out["samples"] = samples
return (out, )
class BasicScheduler(io.ComfyNode):
@classmethod
@ -978,6 +1114,46 @@ class SamplerCustomAdvanced(io.ComfyNode):
return io.NodeOutput(out, out_denoised)
sample = execute
class TimeToMoveKSamplerAdvanced(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.Schema(
node_id="TimeToMoveKSamplerAdvanced",
category="sampling/time_to_move",
inputs=[
io.Model.Input("model"),
io.Combo.Input("add_noise", options=["enable", "disable"], advanced=True),
io.Int.Input("noise_seed", default=0, min=0, max=0xffffffffffffffff, control_after_generate=True),
io.Int.Input("steps", default=20, min=1, max=10000),
io.Float.Input("cfg", default=8.0, min=0.0, max=100.0, step=0.1, round=0.01),
io.Combo.Input("sampler_name", options = comfy.samplers.KSampler.SAMPLERS),
io.Combo.Input("scheduler", options = comfy.samplers.KSampler.SCHEDULERS),
io.Conditioning.Input("positive"),
io.Conditioning.Input("negative"),
io.Latent.Input("latent_image"),
io.Mask.Input("latent_mask", tooltip = "Make sure mask is the same length as the latents rather than the original video."),
io.Int.Input("start_at_step", default = 0, min = 0, max = 10000, advanced = True, tooltip = "Generally should set at a step greater than 0."),
io.Int.Input("time_to_move_end_at_step", default = 0, min = 0, max = 10000, advanced = True, tooltip = "Generally should set at a step greater than 0 and less than total number of steps."),
io.Int.Input("end_at_step", default = 10000, min = 0, max = 10000, advanced = True, tooltip = "Use just like typical end_at_step with normal KSamplerAdvanced"),
io.Combo.Input("return_with_leftover_noise", options=["disable", "enable"], advanced = True),
],
outputs=[
io.Latent.Output(display_name="latent"),
]
)
@classmethod
def execute(cls, model, add_noise, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, latent_mask, start_at_step, time_to_move_end_at_step, end_at_step, return_with_leftover_noise, denoise=1.0) -> io.NodeOutput:
force_full_denoise = True
if return_with_leftover_noise == "enable":
force_full_denoise = False
disable_noise = False
if add_noise == "disable":
disable_noise = True
return time_to_move_common_ksampler(model, noise_seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, latent_mask, denoise=denoise, disable_noise=disable_noise, start_step=start_at_step, time_to_move_last_step = time_to_move_end_at_step, last_step=end_at_step, force_full_denoise=force_full_denoise)
class AddNoise(io.ComfyNode):
@classmethod
@ -1087,6 +1263,7 @@ class CustomSamplersExtension(ComfyExtension):
DisableNoise,
AddNoise,
SamplerCustomAdvanced,
TimeToMoveKSamplerAdvanced,
ManualSigmas,
]

View File

@ -46,6 +46,42 @@ def composite(destination, source, x, y, mask = None, multiplier = 8, resize_sou
destination[..., top:bottom, left:right] = source_portion + destination_portion
return destination
def convert_rgb_mask_to_latent_mask(
mask: torch.Tensor,
k: int,
spatial_downsample_h: int,
spatial_downsample_w: int
) -> torch.Tensor:
"""
Converts [T, H, W] mask to [T_latent, H_latent, W_latent].
Handles non-square spatial downsampling.
"""
# 1. Temporal Sampling
# Select first frame and every k-th frame thereafter
mask0 = mask[0:1]
mask1 = mask[1::k]
sampled = torch.cat([mask0, mask1], dim=0) # [T_latent, H, W]
# 2. Prepare for Spatial Interpolation
# Shape: [Batch=1, Channels=1, Depth=T_latent, Height=H, Width=W]
sampled = sampled.unsqueeze(0).unsqueeze(0)
# 3. Calculate New Spatial Dimensions
h_latent = sampled.shape[-2] // spatial_downsample_h
w_latent = sampled.shape[-1] // spatial_downsample_w
# 4. Interpolate
# We maintain the temporal count (sampled.shape[2])
# but resize H and W independently
pooled = torch.nn.functional.interpolate(
sampled,
size=(sampled.shape[2], h_latent, w_latent),
mode="nearest"
)
# 5. Return to [T_latent, H_latent, W_latent]
return pooled.squeeze(0).squeeze(0)
class LatentCompositeMasked(IO.ComfyNode):
@classmethod
def define_schema(cls):
@ -73,8 +109,7 @@ class LatentCompositeMasked(IO.ComfyNode):
return IO.NodeOutput(output)
composite = execute # TODO: remove
class ImageCompositeMasked(IO.ComfyNode):
@classmethod
def define_schema(cls):
@ -403,6 +438,30 @@ class ThresholdMask(IO.ComfyNode):
image_to_mask = execute # TODO: remove
class RGBMaskToLatentMask(IO.ComfyNode):
@classmethod
def define_schema(cls):
return IO.Schema(
node_id="RGBMasktoLatentMask",
search_aliases=["rgb mask to latent mask", "rgb mask", "latent mask"],
description="Converts an RGB mask to a latent-space mask for use with causal Video VAEs (e.g., Wan).",
category="latent",
inputs=[
IO.Mask.Input("mask", optional=False),
IO.Vae.Input("vae", optional=False),
],
outputs=[IO.Mask.Output()],
)
@classmethod
def execute(cls, mask, vae) -> IO.NodeOutput:
# Ensure we work on a copy of the mask to remain non-destructive
mask_copy = mask.clone()
downscale_ratio = vae.downscale_ratio
if not isinstance(downscale_ratio, tuple) or len(downscale_ratio) < 3:
raise ValueError("RGBMaskToLatentMask requires a causal Video VAE (e.g., Wan). The provided VAE does not have a compatible downscale_ratio.")
k = (mask.shape[0] - 1) // (downscale_ratio[0](mask.shape[0]) - 1) if (downscale_ratio[0](mask.shape[0]) - 1) > 1 else 1
return IO.NodeOutput(convert_rgb_mask_to_latent_mask(mask_copy, k, spatial_downsample_h = downscale_ratio[1], spatial_downsample_w = downscale_ratio[2]))
# Mask Preview - original implement from
# https://github.com/cubiq/ComfyUI_essentials/blob/9d9f4bedfc9f0321c19faf71855e228c93bd0dc9/mask.py#L81
@ -444,6 +503,7 @@ class MaskExtension(ComfyExtension):
FeatherMask,
GrowMask,
ThresholdMask,
RGBMaskToLatentMask,
MaskPreview,
]