Compare commits

...

7 Commits

Author SHA1 Message Date
Jukka Seppänen
a22433fe41
Merge c03a90ecfc into 4064062e7d 2026-01-31 11:27:19 +09:00
comfyanonymous
4064062e7d
Update python patch version in dep workflow. (#12184) 2026-01-30 20:20:06 -05:00
pythongosssss
8aabe2403e
Add color type and Color to RGB Int node (#12145)
Some checks are pending
Full Comfy CI Workflow Runs / test-stable (12.1, , linux, 3.11, [self-hosted Linux], stable) (push) Waiting to run
Unit Tests / test (macos-latest) (push) Waiting to run
Full Comfy CI Workflow Runs / test-stable (12.1, , linux, 3.12, [self-hosted Linux], stable) (push) Waiting to run
Unit Tests / test (ubuntu-latest) (push) Waiting to run
Full Comfy CI Workflow Runs / test-unix-nightly (12.1, , linux, 3.11, [self-hosted Linux], nightly) (push) Waiting to run
Unit Tests / test (windows-2022) (push) Waiting to run
Python Linting / Run Ruff (push) Waiting to run
Execution Tests / test (macos-latest) (push) Waiting to run
Python Linting / Run Pylint (push) Waiting to run
Execution Tests / test (ubuntu-latest) (push) Waiting to run
Execution Tests / test (windows-latest) (push) Waiting to run
Full Comfy CI Workflow Runs / test-stable (12.1, , linux, 3.10, [self-hosted Linux], stable) (push) Waiting to run
Test server launches without errors / test (push) Waiting to run
* add color type and color to rgb int node

* review fix for allowing output

---------

Co-authored-by: Jedrzej Kosinski <kosinkadink1@gmail.com>
2026-01-30 15:01:33 -08:00
Alexander Piskun
0167653781
feat(api-nodes): add RecraftCreateStyleNode node (#12055)
Co-authored-by: Jedrzej Kosinski <kosinkadink1@gmail.com>
2026-01-30 14:04:43 -08:00
Jedrzej Kosinski
0a7993729c
Remove NodeInfoV3-related code; we are almost 100% guaranteed to stick with NodeInfoV1 for the foreseable future (#12147)
Co-authored-by: guill <jacob.e.segal@gmail.com>
2026-01-30 10:21:48 -08:00
kijai
c03a90ecfc Fix encode 2026-01-24 11:52:00 +02:00
kijai
570b11198b Flux2: Support Tiny VAE (taef2) 2026-01-23 15:43:29 +02:00
9 changed files with 212 additions and 101 deletions

View File

@ -29,7 +29,7 @@ on:
description: 'python patch version'
required: true
type: string
default: "9"
default: "11"
# push:
# branches:
# - master

View File

@ -224,6 +224,7 @@ class Flux2(LatentFormat):
self.latent_rgb_factors_bias = [-0.0329, -0.0718, -0.0851]
self.latent_rgb_factors_reshape = lambda t: t.reshape(t.shape[0], 32, 2, 2, t.shape[-2], t.shape[-1]).permute(0, 1, 4, 2, 5, 3).reshape(t.shape[0], 32, t.shape[-2] * 2, t.shape[-1] * 2)
self.taesd_decoder_name = "taef2_decoder"
def process_in(self, latent):
return latent

View File

@ -451,7 +451,7 @@ class VAE:
decoder_config={'target': "comfy.ldm.modules.temporal_ae.VideoDecoder", 'params': decoder_config})
elif "taesd_decoder.1.weight" in sd:
self.latent_channels = sd["taesd_decoder.1.weight"].shape[1]
self.first_stage_model = comfy.taesd.taesd.TAESD(latent_channels=self.latent_channels)
self.first_stage_model = comfy.taesd.taesd.TAESD(latent_channels=self.latent_channels, use_midblock_gn = True if "taesd_decoder.3.pool.0.weight" in sd else False)
elif "vquantizer.codebook.weight" in sd: #VQGan: stage a of stable cascade
self.first_stage_model = StageA()
self.downscale_ratio = 4

View File

@ -17,28 +17,36 @@ class Clamp(nn.Module):
return torch.tanh(x / 3) * 3
class Block(nn.Module):
def __init__(self, n_in, n_out):
def __init__(self, n_in, n_out, use_midblock_gn=False):
super().__init__()
self.conv = nn.Sequential(conv(n_in, n_out), nn.ReLU(), conv(n_out, n_out), nn.ReLU(), conv(n_out, n_out))
self.skip = comfy.ops.disable_weight_init.Conv2d(n_in, n_out, 1, bias=False) if n_in != n_out else nn.Identity()
self.fuse = nn.ReLU()
self.pool = None
if use_midblock_gn:
conv1x1, n_gn = lambda n_in, n_out: comfy.ops.disable_weight_init.Conv2d(n_in, n_out, 1, bias=False), n_in*4
self.pool = nn.Sequential(conv1x1(n_in, n_gn), comfy.ops.disable_weight_init.GroupNorm(4, n_gn), nn.ReLU(inplace=True), conv1x1(n_gn, n_in))
def forward(self, x):
if self.pool is not None:
x = x + self.pool(x)
return self.fuse(self.conv(x) + self.skip(x))
def Encoder(latent_channels=4):
def Encoder(latent_channels=4, use_midblock_gn=False):
mb_kw = dict(use_midblock_gn=use_midblock_gn)
return nn.Sequential(
conv(3, 64), Block(64, 64),
conv(64, 64, stride=2, bias=False), Block(64, 64), Block(64, 64), Block(64, 64),
conv(64, 64, stride=2, bias=False), Block(64, 64), Block(64, 64), Block(64, 64),
conv(64, 64, stride=2, bias=False), Block(64, 64), Block(64, 64), Block(64, 64),
conv(64, 64, stride=2, bias=False), Block(64, 64, **mb_kw), Block(64, 64, **mb_kw), Block(64, 64, **mb_kw),
conv(64, latent_channels),
)
def Decoder(latent_channels=4):
def Decoder(latent_channels=4, use_midblock_gn=False):
mb_kw = dict(use_midblock_gn=use_midblock_gn)
return nn.Sequential(
Clamp(), conv(latent_channels, 64), nn.ReLU(),
Block(64, 64), Block(64, 64), Block(64, 64), nn.Upsample(scale_factor=2), conv(64, 64, bias=False),
Block(64, 64, **mb_kw), Block(64, 64, **mb_kw), Block(64, 64, **mb_kw), nn.Upsample(scale_factor=2), conv(64, 64, bias=False),
Block(64, 64), Block(64, 64), Block(64, 64), nn.Upsample(scale_factor=2), conv(64, 64, bias=False),
Block(64, 64), Block(64, 64), Block(64, 64), nn.Upsample(scale_factor=2), conv(64, 64, bias=False),
Block(64, 64), conv(64, 3),
@ -48,17 +56,30 @@ class TAESD(nn.Module):
latent_magnitude = 3
latent_shift = 0.5
def __init__(self, encoder_path=None, decoder_path=None, latent_channels=4):
def __init__(self, encoder_path=None, decoder_path=None, latent_channels=4, use_midblock_gn=False):
"""Initialize pretrained TAESD on the given device from the given checkpoints."""
super().__init__()
self.taesd_encoder = Encoder(latent_channels=latent_channels)
self.taesd_decoder = Decoder(latent_channels=latent_channels)
self.latent_channels = latent_channels
self.use_midblock_gn = use_midblock_gn
self.taesd_encoder = Encoder(latent_channels=latent_channels, use_midblock_gn=use_midblock_gn)
self.taesd_decoder = Decoder(latent_channels=latent_channels, use_midblock_gn=use_midblock_gn)
if encoder_path is not None:
self.taesd_encoder, self.latent_channels = self._load_model(encoder_path, Encoder)
if decoder_path is not None:
self.taesd_decoder, self.latent_channels = self._load_model(decoder_path, Decoder)
self.vae_scale = torch.nn.Parameter(torch.tensor(1.0))
self.vae_shift = torch.nn.Parameter(torch.tensor(0.0))
if encoder_path is not None:
self.taesd_encoder.load_state_dict(comfy.utils.load_torch_file(encoder_path, safe_load=True))
if decoder_path is not None:
self.taesd_decoder.load_state_dict(comfy.utils.load_torch_file(decoder_path, safe_load=True))
def _load_model(self, path, model_class):
"""Load a TAESD encoder or decoder from a file."""
sd = comfy.utils.load_torch_file(path, safe_load=True)
latent_channels = sd["1.weight"].shape[1]
model = model_class(latent_channels=latent_channels, use_midblock_gn="3.pool.0.weight" in sd)
model.load_state_dict(sd)
return model, latent_channels
@staticmethod
def scale_latents(x):
@ -71,9 +92,15 @@ class TAESD(nn.Module):
return x.sub(TAESD.latent_shift).mul(2 * TAESD.latent_magnitude)
def decode(self, x):
if x.shape[1] == self.latent_channels * 4:
x = x.reshape(x.shape[0], self.latent_channels, 2, 2, x.shape[-2], x.shape[-1]).permute(0, 1, 4, 2, 5, 3).reshape(x.shape[0], self.latent_channels, x.shape[-2] * 2, x.shape[-1] * 2)
x_sample = self.taesd_decoder((x - self.vae_shift) * self.vae_scale)
x_sample = x_sample.sub(0.5).mul(2)
return x_sample
def encode(self, x):
return (self.taesd_encoder(x * 0.5 + 0.5) / self.vae_scale) + self.vae_shift
x_sample = (self.taesd_encoder(x * 0.5 + 0.5) / self.vae_scale) + self.vae_shift
if self.latent_channels == 32 and self.use_midblock_gn: # Only taef2 for Flux2 currently, pack latents: [B, C, H, W] -> [B, C*4, H//2, W//2]
x_sample = x_sample.reshape(x_sample.shape[0], self.latent_channels, x_sample.shape[-2] // 2, 2, x_sample.shape[-1] // 2, 2).permute(0, 1, 3, 5, 2, 4).reshape(x_sample.shape[0], self.latent_channels * 4, x_sample.shape[-2] // 2, x_sample.shape[-1] // 2)
return x_sample

View File

@ -1146,6 +1146,20 @@ class ImageCompare(ComfyTypeI):
def as_dict(self):
return super().as_dict()
@comfytype(io_type="COLOR")
class Color(ComfyTypeIO):
Type = str
class Input(WidgetInput):
def __init__(self, id: str, display_name: str=None, optional=False, tooltip: str=None,
socketless: bool=True, advanced: bool=None, default: str="#ffffff"):
super().__init__(id, display_name, optional, tooltip, None, default, socketless, None, None, None, None, advanced)
self.default: str
def as_dict(self):
return super().as_dict()
DYNAMIC_INPUT_LOOKUP: dict[str, Callable[[dict[str, Any], dict[str, Any], tuple[str, dict[str, Any]], str, list[str] | None], None]] = {}
def register_dynamic_input_func(io_type: str, func: Callable[[dict[str, Any], dict[str, Any], tuple[str, dict[str, Any]], str, list[str] | None], None]):
DYNAMIC_INPUT_LOOKUP[io_type] = func
@ -1252,23 +1266,6 @@ class NodeInfoV1:
price_badge: dict | None = None
search_aliases: list[str]=None
@dataclass
class NodeInfoV3:
input: dict=None
output: dict=None
hidden: list[str]=None
name: str=None
display_name: str=None
description: str=None
python_module: Any = None
category: str=None
output_node: bool=None
deprecated: bool=None
experimental: bool=None
dev_only: bool=None
api_node: bool=None
price_badge: dict | None = None
@dataclass
class PriceBadgeDepends:
@ -1497,40 +1494,6 @@ class Schema:
)
return info
def get_v3_info(self, cls) -> NodeInfoV3:
input_dict = {}
output_dict = {}
hidden_list = []
# TODO: make sure dynamic types will be handled correctly
if self.inputs:
for input in self.inputs:
add_to_dict_v3(input, input_dict)
if self.outputs:
for output in self.outputs:
add_to_dict_v3(output, output_dict)
if self.hidden:
for hidden in self.hidden:
hidden_list.append(hidden.value)
info = NodeInfoV3(
input=input_dict,
output=output_dict,
hidden=hidden_list,
name=self.node_id,
display_name=self.display_name,
description=self.description,
category=self.category,
output_node=self.is_output_node,
deprecated=self.is_deprecated,
experimental=self.is_experimental,
dev_only=self.is_dev_only,
api_node=self.is_api_node,
python_module=getattr(cls, "RELATIVE_PYTHON_MODULE", "nodes"),
price_badge=self.price_badge.as_dict(self.inputs) if self.price_badge is not None else None,
)
return info
def get_finalized_class_inputs(d: dict[str, Any], live_inputs: dict[str, Any], include_hidden=False) -> tuple[dict[str, Any], V3Data]:
out_dict = {
"required": {},
@ -1585,9 +1548,6 @@ def add_to_dict_v1(i: Input, d: dict):
as_dict.pop("optional", None)
d.setdefault(key, {})[i.id] = (i.get_io_type(), as_dict)
def add_to_dict_v3(io: Input | Output, d: dict):
d[io.id] = (io.get_io_type(), io.as_dict())
class DynamicPathsDefaultValue:
EMPTY_DICT = "empty_dict"
@ -1748,13 +1708,6 @@ class _ComfyNodeBaseInternal(_ComfyNodeInternal):
# set hidden
type_clone.hidden = HiddenHolder.from_v3_data(v3_data)
return type_clone
@final
@classmethod
def GET_NODE_INFO_V3(cls) -> dict[str, Any]:
schema = cls.GET_SCHEMA()
info = schema.get_v3_info(cls)
return asdict(info)
#############################################
# V1 Backwards Compatibility code
#--------------------------------------------
@ -2099,6 +2052,7 @@ __all__ = [
"AnyType",
"MultiType",
"Tracks",
"Color",
# Dynamic Types
"MatchType",
"DynamicCombo",
@ -2107,12 +2061,10 @@ __all__ = [
"HiddenHolder",
"Hidden",
"NodeInfoV1",
"NodeInfoV3",
"Schema",
"ComfyNode",
"NodeOutput",
"add_to_dict_v1",
"add_to_dict_v3",
"V3Data",
"ImageCompare",
"PriceBadgeDepends",

View File

@ -1,11 +1,8 @@
from __future__ import annotations
from enum import Enum
from typing import Optional
from pydantic import BaseModel, Field, conint, confloat
from pydantic import BaseModel, Field
class RecraftColor:
@ -229,24 +226,24 @@ class RecraftColorObject(BaseModel):
class RecraftControlsObject(BaseModel):
colors: Optional[list[RecraftColorObject]] = Field(None, description='An array of preferable colors')
background_color: Optional[RecraftColorObject] = Field(None, description='Use given color as a desired background color')
no_text: Optional[bool] = Field(None, description='Do not embed text layouts')
artistic_level: Optional[conint(ge=0, le=5)] = Field(None, description='Defines artistic tone of your image. At a simple level, the person looks straight at the camera in a static and clean style. Dynamic and eccentric levels introduce movement and creativity. The value should be in range [0..5].')
colors: list[RecraftColorObject] | None = Field(None, description='An array of preferable colors')
background_color: RecraftColorObject | None = Field(None, description='Use given color as a desired background color')
no_text: bool | None = Field(None, description='Do not embed text layouts')
artistic_level: int | None = Field(None, description='Defines artistic tone of your image. At a simple level, the person looks straight at the camera in a static and clean style. Dynamic and eccentric levels introduce movement and creativity. The value should be in range [0..5].')
class RecraftImageGenerationRequest(BaseModel):
prompt: str = Field(..., description='The text prompt describing the image to generate')
size: Optional[RecraftImageSize] = Field(None, description='The size of the generated image (e.g., "1024x1024")')
n: conint(ge=1, le=6) = Field(..., description='The number of images to generate')
negative_prompt: Optional[str] = Field(None, description='A text description of undesired elements on an image')
model: Optional[RecraftModel] = Field(RecraftModel.recraftv3, description='The model to use for generation (e.g., "recraftv3")')
style: Optional[str] = Field(None, description='The style to apply to the generated image (e.g., "digital_illustration")')
substyle: Optional[str] = Field(None, description='The substyle to apply to the generated image, depending on the style input')
controls: Optional[RecraftControlsObject] = Field(None, description='A set of custom parameters to tweak generation process')
style_id: Optional[str] = Field(None, description='Use a previously uploaded style as a reference; UUID')
strength: Optional[confloat(ge=0.0, le=1.0)] = Field(None, description='Defines the difference with the original image, should lie in [0, 1], where 0 means almost identical, and 1 means miserable similarity')
random_seed: Optional[int] = Field(None, description="Seed for video generation")
size: RecraftImageSize | None = Field(None, description='The size of the generated image (e.g., "1024x1024")')
n: int = Field(..., description='The number of images to generate')
negative_prompt: str | None = Field(None, description='A text description of undesired elements on an image')
model: RecraftModel | None = Field(RecraftModel.recraftv3, description='The model to use for generation (e.g., "recraftv3")')
style: str | None = Field(None, description='The style to apply to the generated image (e.g., "digital_illustration")')
substyle: str | None = Field(None, description='The substyle to apply to the generated image, depending on the style input')
controls: RecraftControlsObject | None = Field(None, description='A set of custom parameters to tweak generation process')
style_id: str | None = Field(None, description='Use a previously uploaded style as a reference; UUID')
strength: float | None = Field(None, description='Defines the difference with the original image, should lie in [0, 1], where 0 means almost identical, and 1 means miserable similarity')
random_seed: int | None = Field(None, description="Seed for video generation")
# text_layout
@ -258,5 +255,13 @@ class RecraftReturnedObject(BaseModel):
class RecraftImageGenerationResponse(BaseModel):
created: int = Field(..., description='Unix timestamp when the generation was created')
credits: int = Field(..., description='Number of credits used for the generation')
data: Optional[list[RecraftReturnedObject]] = Field(None, description='Array of generated image information')
image: Optional[RecraftReturnedObject] = Field(None, description='Single generated image')
data: list[RecraftReturnedObject] | None = Field(None, description='Array of generated image information')
image: RecraftReturnedObject | None = Field(None, description='Single generated image')
class RecraftCreateStyleRequest(BaseModel):
style: str = Field(..., description="realistic_image, digital_illustration, vector_illustration, or icon")
class RecraftCreateStyleResponse(BaseModel):
id: str = Field(..., description="UUID of the created style")

View File

@ -12,6 +12,8 @@ from comfy_api_nodes.apis.recraft import (
RecraftColor,
RecraftColorChain,
RecraftControls,
RecraftCreateStyleRequest,
RecraftCreateStyleResponse,
RecraftImageGenerationRequest,
RecraftImageGenerationResponse,
RecraftImageSize,
@ -323,6 +325,75 @@ class RecraftStyleInfiniteStyleLibrary(IO.ComfyNode):
return IO.NodeOutput(RecraftStyle(style_id=style_id))
class RecraftCreateStyleNode(IO.ComfyNode):
@classmethod
def define_schema(cls):
return IO.Schema(
node_id="RecraftCreateStyleNode",
display_name="Recraft Create Style",
category="api node/image/Recraft",
description="Create a custom style from reference images. "
"Upload 1-5 images to use as style references. "
"Total size of all images is limited to 5 MB.",
inputs=[
IO.Combo.Input(
"style",
options=["realistic_image", "digital_illustration"],
tooltip="The base style of the generated images.",
),
IO.Autogrow.Input(
"images",
template=IO.Autogrow.TemplatePrefix(
IO.Image.Input("image"),
prefix="image",
min=1,
max=5,
),
),
],
outputs=[
IO.String.Output(display_name="style_id"),
],
hidden=[
IO.Hidden.auth_token_comfy_org,
IO.Hidden.api_key_comfy_org,
IO.Hidden.unique_id,
],
is_api_node=True,
price_badge=IO.PriceBadge(
expr="""{"type":"usd","usd": 0.04}""",
),
)
@classmethod
async def execute(
cls,
style: str,
images: IO.Autogrow.Type,
) -> IO.NodeOutput:
files = []
total_size = 0
max_total_size = 5 * 1024 * 1024 # 5 MB limit
for i, img in enumerate(list(images.values())):
file_bytes = tensor_to_bytesio(img, total_pixels=2048 * 2048, mime_type="image/webp").read()
total_size += len(file_bytes)
if total_size > max_total_size:
raise Exception("Total size of all images exceeds 5 MB limit.")
files.append((f"file{i + 1}", file_bytes))
response = await sync_op(
cls,
endpoint=ApiEndpoint(path="/proxy/recraft/styles", method="POST"),
response_model=RecraftCreateStyleResponse,
files=files,
data=RecraftCreateStyleRequest(style=style),
content_type="multipart/form-data",
max_retries=1,
)
return IO.NodeOutput(response.id)
class RecraftTextToImageNode(IO.ComfyNode):
@classmethod
def define_schema(cls):
@ -395,7 +466,7 @@ class RecraftTextToImageNode(IO.ComfyNode):
negative_prompt: str = None,
recraft_controls: RecraftControls = None,
) -> IO.NodeOutput:
validate_string(prompt, strip_whitespace=False, max_length=1000)
validate_string(prompt, strip_whitespace=False, min_length=1, max_length=1000)
default_style = RecraftStyle(RecraftStyleV3.realistic_image)
if recraft_style is None:
recraft_style = default_style
@ -1024,6 +1095,7 @@ class RecraftExtension(ComfyExtension):
RecraftStyleV3DigitalIllustrationNode,
RecraftStyleV3LogoRasterNode,
RecraftStyleInfiniteStyleLibrary,
RecraftCreateStyleNode,
RecraftColorRGBNode,
RecraftControlsNode,
]

View File

@ -0,0 +1,42 @@
from typing_extensions import override
from comfy_api.latest import ComfyExtension, io
class ColorToRGBInt(io.ComfyNode):
@classmethod
def define_schema(cls) -> io.Schema:
return io.Schema(
node_id="ColorToRGBInt",
display_name="Color to RGB Int",
category="utils",
description="Convert a color to a RGB integer value.",
inputs=[
io.Color.Input("color"),
],
outputs=[
io.Int.Output(display_name="rgb_int"),
],
)
@classmethod
def execute(
cls,
color: str,
) -> io.NodeOutput:
# expect format #RRGGBB
if len(color) != 7 or color[0] != "#":
raise ValueError("Color must be in format #RRGGBB")
r = int(color[1:3], 16)
g = int(color[3:5], 16)
b = int(color[5:7], 16)
return io.NodeOutput(r * 256 * 256 + g * 256 + b)
class ColorExtension(ComfyExtension):
@override
async def get_node_list(self) -> list[type[io.ComfyNode]]:
return [ColorToRGBInt]
async def comfy_entrypoint() -> ColorExtension:
return ColorExtension()

View File

@ -724,7 +724,7 @@ class LoraLoaderModelOnly(LoraLoader):
class VAELoader:
video_taes = ["taehv", "lighttaew2_2", "lighttaew2_1", "lighttaehy1_5", "taeltx_2"]
image_taes = ["taesd", "taesdxl", "taesd3", "taef1"]
image_taes = ["taesd", "taesdxl", "taesd3", "taef1", "taef2"]
@staticmethod
def vae_list(s):
vaes = folder_paths.get_filename_list("vae")
@ -737,6 +737,8 @@ class VAELoader:
sd3_taesd_dec = False
f1_taesd_enc = False
f1_taesd_dec = False
f2_taesd_enc = False
f2_taesd_dec = False
for v in approx_vaes:
if v.startswith("taesd_decoder."):
@ -755,6 +757,10 @@ class VAELoader:
f1_taesd_dec = True
elif v.startswith("taef1_decoder."):
f1_taesd_enc = True
elif v.startswith("taef2_encoder."):
f2_taesd_dec = True
elif v.startswith("taef2_decoder."):
f2_taesd_enc = True
else:
for tae in s.video_taes:
if v.startswith(tae):
@ -768,6 +774,8 @@ class VAELoader:
vaes.append("taesd3")
if f1_taesd_dec and f1_taesd_enc:
vaes.append("taef1")
if f2_taesd_dec and f2_taesd_enc:
vaes.append("taef2")
vaes.append("pixel_space")
return vaes
@ -799,6 +807,9 @@ class VAELoader:
elif name == "taef1":
sd["vae_scale"] = torch.tensor(0.3611)
sd["vae_shift"] = torch.tensor(0.1159)
elif name == "taef2":
sd["vae_scale"] = torch.tensor(1.0)
sd["vae_shift"] = torch.tensor(0.0)
return sd
@classmethod
@ -2432,7 +2443,8 @@ async def init_builtin_extra_nodes():
"nodes_wanmove.py",
"nodes_image_compare.py",
"nodes_zimage.py",
"nodes_lora_debug.py"
"nodes_lora_debug.py",
"nodes_color.py"
]
import_failed = []