diff --git a/.github/workflows/openapi-lint.yml b/.github/workflows/openapi-lint.yml new file mode 100644 index 000000000..be949de2a --- /dev/null +++ b/.github/workflows/openapi-lint.yml @@ -0,0 +1,31 @@ +name: OpenAPI Lint + +on: + pull_request: + paths: + - 'openapi.yaml' + - '.spectral.yaml' + - '.github/workflows/openapi-lint.yml' + +permissions: + contents: read + +jobs: + spectral: + name: Run Spectral + runs-on: ubuntu-latest + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Set up Node.js + uses: actions/setup-node@v4 + with: + node-version: '20' + + - name: Install Spectral + run: npm install -g @stoplight/spectral-cli@6 + + - name: Lint openapi.yaml + run: spectral lint openapi.yaml --ruleset .spectral.yaml --fail-severity=error diff --git a/.spectral.yaml b/.spectral.yaml new file mode 100644 index 000000000..4bb4a4a94 --- /dev/null +++ b/.spectral.yaml @@ -0,0 +1,91 @@ +extends: + - spectral:oas + +# Severity levels: error, warn, info, hint, off +# Rules from the built-in "spectral:oas" ruleset are active by default. +# Below we tune severity and add custom rules for our conventions. +# +# This ruleset mirrors Comfy-Org/cloud/.spectral.yaml so specs across the +# organization are linted against a single consistent standard. + +rules: + # ----------------------------------------------------------------------- + # Built-in rule severity overrides + # ----------------------------------------------------------------------- + operation-operationId: error + operation-description: warn + operation-tag-defined: error + info-contact: off + info-description: warn + no-eval-in-markdown: error + no-$ref-siblings: error + + # ----------------------------------------------------------------------- + # Custom rules: naming conventions + # ----------------------------------------------------------------------- + + # Property names should be snake_case + property-name-snake-case: + description: Property names must be snake_case + severity: warn + given: "$.components.schemas.*.properties[*]~" + then: + function: pattern + functionOptions: + match: "^[a-z][a-z0-9]*(_[a-z0-9]+)*$" + + # Operation IDs should be camelCase + operation-id-camel-case: + description: Operation IDs must be camelCase + severity: warn + given: "$.paths.*.*.operationId" + then: + function: pattern + functionOptions: + match: "^[a-z][a-zA-Z0-9]*$" + + # ----------------------------------------------------------------------- + # Custom rules: response conventions + # ----------------------------------------------------------------------- + + # Error responses (4xx, 5xx) should use a consistent shape + error-response-schema: + description: Error responses should reference a standard error schema + severity: hint + given: "$.paths.*.*.responses[?(@property >= '400' && @property < '600')].content['application/json'].schema" + then: + field: "$ref" + function: truthy + + # All 2xx responses with JSON body should have a schema + response-schema-defined: + description: Success responses with JSON content should define a schema + severity: warn + given: "$.paths.*.*.responses[?(@property >= '200' && @property < '300')].content['application/json']" + then: + field: schema + function: truthy + + # ----------------------------------------------------------------------- + # Custom rules: best practices + # ----------------------------------------------------------------------- + + # Path parameters must have a description + path-param-description: + description: Path parameters should have a description + severity: warn + given: + - "$.paths.*.parameters[?(@.in == 'path')]" + - "$.paths.*.*.parameters[?(@.in == 'path')]" + then: + field: description + function: truthy + + # Schemas should have a description + schema-description: + description: Component schemas should have a description + severity: hint + given: "$.components.schemas.*" + then: + field: description + function: truthy diff --git a/app/user_manager.py b/app/user_manager.py index e18afb71b..0517b3344 100644 --- a/app/user_manager.py +++ b/app/user_manager.py @@ -28,8 +28,8 @@ def get_file_info(path: str, relative_to: str) -> FileInfo: return { "path": os.path.relpath(path, relative_to).replace(os.sep, '/'), "size": os.path.getsize(path), - "modified": os.path.getmtime(path), - "created": os.path.getctime(path) + "modified": int(os.path.getmtime(path) * 1000), + "created": int(os.path.getctime(path) * 1000), } diff --git a/comfy/context_windows.py b/comfy/context_windows.py index cb44ee6e8..db57537a2 100644 --- a/comfy/context_windows.py +++ b/comfy/context_windows.py @@ -63,7 +63,11 @@ class IndexListContextWindow(ContextWindowABC): dim = self.dim if dim == 0 and full.shape[dim] == 1: return full - idx = tuple([slice(None)] * dim + [self.index_list]) + indices = self.index_list + anchor_idx = getattr(self, 'causal_anchor_index', None) + if anchor_idx is not None and anchor_idx >= 0: + indices = [anchor_idx] + list(indices) + idx = tuple([slice(None)] * dim + [indices]) window = full[idx] if retain_index_list: idx = tuple([slice(None)] * dim + [retain_index_list]) @@ -113,7 +117,14 @@ def slice_cond(cond_value, window: IndexListContextWindow, x_in: torch.Tensor, d # skip leading latent positions that have no corresponding conditioning (e.g. reference frames) if temporal_offset > 0: - indices = [i - temporal_offset for i in window.index_list[temporal_offset:]] + anchor_idx = getattr(window, 'causal_anchor_index', None) + if anchor_idx is not None and anchor_idx >= 0: + # anchor occupies one of the no-cond positions, so skip one fewer from window.index_list + skip_count = temporal_offset - 1 + else: + skip_count = temporal_offset + + indices = [i - temporal_offset for i in window.index_list[skip_count:]] indices = [i for i in indices if 0 <= i] else: indices = list(window.index_list) @@ -150,7 +161,8 @@ class ContextFuseMethod: ContextResults = collections.namedtuple("ContextResults", ['window_idx', 'sub_conds_out', 'sub_conds', 'window']) class IndexListContextHandler(ContextHandlerABC): def __init__(self, context_schedule: ContextSchedule, fuse_method: ContextFuseMethod, context_length: int=1, context_overlap: int=0, context_stride: int=1, - closed_loop: bool=False, dim:int=0, freenoise: bool=False, cond_retain_index_list: list[int]=[], split_conds_to_windows: bool=False): + closed_loop: bool=False, dim:int=0, freenoise: bool=False, cond_retain_index_list: list[int]=[], split_conds_to_windows: bool=False, + causal_window_fix: bool=True): self.context_schedule = context_schedule self.fuse_method = fuse_method self.context_length = context_length @@ -162,6 +174,7 @@ class IndexListContextHandler(ContextHandlerABC): self.freenoise = freenoise self.cond_retain_index_list = [int(x.strip()) for x in cond_retain_index_list.split(",")] if cond_retain_index_list else [] self.split_conds_to_windows = split_conds_to_windows + self.causal_window_fix = causal_window_fix self.callbacks = {} @@ -318,6 +331,14 @@ class IndexListContextHandler(ContextHandlerABC): # allow processing to end between context window executions for faster Cancel comfy.model_management.throw_exception_if_processing_interrupted() + # causal_window_fix: prepend a pre-window frame that will be stripped post-forward + anchor_applied = False + if self.causal_window_fix: + anchor_idx = window.index_list[0] - 1 + if 0 <= anchor_idx < x_in.size(self.dim): + window.causal_anchor_index = anchor_idx + anchor_applied = True + for callback in comfy.patcher_extension.get_all_callbacks(IndexListCallbacks.EVALUATE_CONTEXT_WINDOWS, self.callbacks): callback(self, model, x_in, conds, timestep, model_options, window_idx, window, model_options, device, first_device) @@ -332,6 +353,12 @@ class IndexListContextHandler(ContextHandlerABC): if device is not None: for i in range(len(sub_conds_out)): sub_conds_out[i] = sub_conds_out[i].to(x_in.device) + + # strip causal_window_fix anchor if applied + if anchor_applied: + for i in range(len(sub_conds_out)): + sub_conds_out[i] = sub_conds_out[i].narrow(self.dim, 1, sub_conds_out[i].shape[self.dim] - 1) + results.append(ContextResults(window_idx, sub_conds_out, sub_conds, window)) return results diff --git a/comfy/latent_formats.py b/comfy/latent_formats.py index 3dac5be18..91bebed3d 100644 --- a/comfy/latent_formats.py +++ b/comfy/latent_formats.py @@ -9,6 +9,7 @@ class LatentFormat: latent_rgb_factors_reshape = None taesd_decoder_name = None spacial_downscale_ratio = 8 + temporal_downscale_ratio = 1 def process_in(self, latent): return latent * self.scale_factor @@ -235,6 +236,7 @@ class Flux2(LatentFormat): class Mochi(LatentFormat): latent_channels = 12 latent_dimensions = 3 + temporal_downscale_ratio = 6 def __init__(self): self.scale_factor = 1.0 @@ -278,6 +280,7 @@ class LTXV(LatentFormat): latent_channels = 128 latent_dimensions = 3 spacial_downscale_ratio = 32 + temporal_downscale_ratio = 8 def __init__(self): self.latent_rgb_factors = [ @@ -421,6 +424,7 @@ class LTXAV(LTXV): class HunyuanVideo(LatentFormat): latent_channels = 16 latent_dimensions = 3 + temporal_downscale_ratio = 4 scale_factor = 0.476986 latent_rgb_factors = [ [-0.0395, -0.0331, 0.0445], @@ -447,6 +451,7 @@ class HunyuanVideo(LatentFormat): class Cosmos1CV8x8x8(LatentFormat): latent_channels = 16 latent_dimensions = 3 + temporal_downscale_ratio = 8 latent_rgb_factors = [ [ 0.1817, 0.2284, 0.2423], @@ -472,6 +477,7 @@ class Cosmos1CV8x8x8(LatentFormat): class Wan21(LatentFormat): latent_channels = 16 latent_dimensions = 3 + temporal_downscale_ratio = 4 latent_rgb_factors = [ [-0.1299, -0.1692, 0.2932], @@ -734,6 +740,7 @@ class HunyuanVideo15(LatentFormat): latent_channels = 32 latent_dimensions = 3 spacial_downscale_ratio = 16 + temporal_downscale_ratio = 4 scale_factor = 1.03682 taesd_decoder_name = "lighttaehy1_5" @@ -786,8 +793,27 @@ class ZImagePixelSpace(ChromaRadiance): pass class CogVideoX(LatentFormat): + """Latent format for CogVideoX-2b (THUDM/CogVideoX-2b). + + scale_factor matches the vae/config.json scaling_factor for the 2b variant. + The 5b-class checkpoints (CogVideoX-5b, CogVideoX-1.5-5B, CogVideoX-Fun-V1.5-*) + use a different value; see CogVideoX1_5 below. + """ latent_channels = 16 latent_dimensions = 3 + temporal_downscale_ratio = 4 def __init__(self): self.scale_factor = 1.15258426 + + +class CogVideoX1_5(CogVideoX): + """Latent format for 5b-class CogVideoX checkpoints. + + Covers THUDM/CogVideoX-5b, THUDM/CogVideoX-1.5-5B, and the CogVideoX-Fun + V1.5-5b family (including VOID inpainting). All of these have + scaling_factor=0.7 in their vae/config.json. Auto-selected in + supported_models.CogVideoX_T2V based on transformer hidden dim. + """ + def __init__(self): + self.scale_factor = 0.7 diff --git a/comfy/sd.py b/comfy/sd.py index 3a4e221dd..3ac5bcdfb 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -66,6 +66,7 @@ import comfy.text_encoders.longcat_image import comfy.text_encoders.qwen35 import comfy.text_encoders.ernie import comfy.text_encoders.gemma4 +import comfy.text_encoders.cogvideo import comfy.model_patcher import comfy.lora @@ -1238,6 +1239,7 @@ class CLIPType(Enum): NEWBIE = 24 FLUX2 = 25 LONGCAT_IMAGE = 26 + COGVIDEOX = 27 @@ -1442,6 +1444,9 @@ def load_text_encoder_state_dicts(state_dicts=[], embedding_directory=None, clip clip_target.clip = comfy.text_encoders.hidream.hidream_clip(**t5xxl_detect(clip_data), clip_l=False, clip_g=False, t5=True, llama=False, dtype_llama=None) clip_target.tokenizer = comfy.text_encoders.hidream.HiDreamTokenizer + elif clip_type == CLIPType.COGVIDEOX: + clip_target.clip = comfy.text_encoders.cogvideo.cogvideo_te(**t5xxl_detect(clip_data)) + clip_target.tokenizer = comfy.text_encoders.cogvideo.CogVideoXTokenizer else: #CLIPType.MOCHI clip_target.clip = comfy.text_encoders.genmo.mochi_te(**t5xxl_detect(clip_data)) clip_target.tokenizer = comfy.text_encoders.genmo.MochiT5Tokenizer diff --git a/comfy/supported_models.py b/comfy/supported_models.py index 1cf1f06b4..91cb2408a 100644 --- a/comfy/supported_models.py +++ b/comfy/supported_models.py @@ -1881,6 +1881,14 @@ class CogVideoX_T2V(supported_models_base.BASE): vae_key_prefix = ["vae."] text_encoder_key_prefix = ["text_encoders."] + def __init__(self, unet_config): + # 2b-class (dim=1920, heads=30) uses scale_factor=1.15258426. + # 5b-class (dim=3072, heads=48) — incl. CogVideoX-5b, 1.5-5B, and + # Fun-V1.5 inpainting — uses scale_factor=0.7 per vae/config.json. + if unet_config.get("num_attention_heads", 0) >= 48: + self.latent_format = latent_formats.CogVideoX1_5 + super().__init__(unet_config) + def get_model(self, state_dict, prefix="", device=None): # CogVideoX 1.5 (patch_size_t=2) has different training base dimensions for RoPE if self.unet_config.get("patch_size_t") is not None: @@ -1907,6 +1915,20 @@ class CogVideoX_I2V(CogVideoX_T2V): out = model_base.CogVideoX(self, image_to_video=True, device=device) return out +class CogVideoX_Inpaint(CogVideoX_T2V): + unet_config = { + "image_model": "cogvideox", + "in_channels": 48, + } + + def get_model(self, state_dict, prefix="", device=None): + if self.unet_config.get("patch_size_t") is not None: + self.unet_config.setdefault("sample_height", 96) + self.unet_config.setdefault("sample_width", 170) + self.unet_config.setdefault("sample_frames", 81) + out = model_base.CogVideoX(self, image_to_video=True, device=device) + return out + models = [ LotusD, @@ -1988,6 +2010,7 @@ models = [ ErnieImage, SAM3, SAM31, + CogVideoX_Inpaint, CogVideoX_I2V, CogVideoX_T2V, SVD_img2vid, diff --git a/comfy/text_encoders/cogvideo.py b/comfy/text_encoders/cogvideo.py index f1e8e3f5d..b97310709 100644 --- a/comfy/text_encoders/cogvideo.py +++ b/comfy/text_encoders/cogvideo.py @@ -1,6 +1,48 @@ import comfy.text_encoders.sd3_clip +from comfy import sd1_clip class CogVideoXT5Tokenizer(comfy.text_encoders.sd3_clip.T5XXLTokenizer): + """Inner T5 tokenizer for CogVideoX. + + CogVideoX was trained with T5 embeddings padded to 226 tokens (not 77 like SD3). + Used both directly by supported_models.CogVideoX_T2V.clip_target (paired with + the raw T5XXLModel) and by the CogVideoXTokenizer outer wrapper below. + """ def __init__(self, embedding_directory=None, tokenizer_data={}): super().__init__(embedding_directory=embedding_directory, tokenizer_data=tokenizer_data, min_length=226) + + +class CogVideoXTokenizer(sd1_clip.SD1Tokenizer): + """Outer tokenizer wrapper for CLIPLoader (type="cogvideox").""" + def __init__(self, embedding_directory=None, tokenizer_data={}): + super().__init__(embedding_directory=embedding_directory, tokenizer_data=tokenizer_data, + clip_name="t5xxl", tokenizer=CogVideoXT5Tokenizer) + + +class CogVideoXT5XXL(sd1_clip.SD1ClipModel): + """Outer T5XXL model wrapper for CLIPLoader (type="cogvideox"). + + Wraps the raw T5XXL model in the SD1ClipModel interface so that CLIP.__init__ + (which reads self.dtypes) works correctly. The inner model is the standard + sd3_clip.T5XXLModel (no attention_mask change needed for CogVideoX). + """ + def __init__(self, device="cpu", dtype=None, model_options={}): + super().__init__(device=device, dtype=dtype, name="t5xxl", + clip_model=comfy.text_encoders.sd3_clip.T5XXLModel, + model_options=model_options) + + +def cogvideo_te(dtype_t5=None, t5_quantization_metadata=None): + """Factory that returns a CogVideoXT5XXL class configured with the detected + T5 dtype and optional quantization metadata, for use in load_text_encoder_state_dicts. + """ + class CogVideoXTEModel_(CogVideoXT5XXL): + def __init__(self, device="cpu", dtype=None, model_options={}): + if t5_quantization_metadata is not None: + model_options = model_options.copy() + model_options["t5xxl_quantization_metadata"] = t5_quantization_metadata + if dtype_t5 is not None: + dtype = dtype_t5 + super().__init__(device=device, dtype=dtype, model_options=model_options) + return CogVideoXTEModel_ diff --git a/comfy_api/latest/_io.py b/comfy_api/latest/_io.py index 4942ed46c..e50266bc5 100644 --- a/comfy_api/latest/_io.py +++ b/comfy_api/latest/_io.py @@ -395,7 +395,6 @@ class Combo(ComfyTypeIO): @comfytype(io_type="COMBO") class MultiCombo(ComfyTypeI): '''Multiselect Combo input (dropdown for selecting potentially more than one value).''' - # TODO: something is wrong with the serialization, frontend does not recognize it as multiselect Type = list[str] class Input(Combo.Input): def __init__(self, id: str, options: list[str], display_name: str=None, optional=False, tooltip: str=None, lazy: bool=None, @@ -408,12 +407,14 @@ class MultiCombo(ComfyTypeI): self.default: list[str] def as_dict(self): - to_return = super().as_dict() | prune_dict({ - "multi_select": self.multiselect, - "placeholder": self.placeholder, - "chip": self.chip, + # Frontend expects `multi_select` to be an object config (not a boolean). + # Keep top-level `multiselect` from Combo.Input for backwards compatibility. + return super().as_dict() | prune_dict({ + "multi_select": prune_dict({ + "placeholder": self.placeholder, + "chip": self.chip, + }), }) - return to_return @comfytype(io_type="IMAGE") class Image(ComfyTypeIO): diff --git a/comfy_api_nodes/apis/luma.py b/comfy_api_nodes/apis/luma.py index 632c4ab96..8c6db2022 100644 --- a/comfy_api_nodes/apis/luma.py +++ b/comfy_api_nodes/apis/luma.py @@ -1,15 +1,12 @@ from __future__ import annotations - -import torch - from enum import Enum from typing import Optional, Union +import torch from pydantic import BaseModel, Field, confloat - class LumaIO: LUMA_REF = "LUMA_REF" LUMA_CONCEPTS = "LUMA_CONCEPTS" @@ -183,13 +180,13 @@ class LumaAssets(BaseModel): class LumaImageRef(BaseModel): - '''Used for image gen''' + """Used for image gen""" url: str = Field(..., description='The URL of the image reference') weight: confloat(ge=0.0, le=1.0) = Field(..., description='The weight of the image reference') class LumaImageReference(BaseModel): - '''Used for video gen''' + """Used for video gen""" type: Optional[str] = Field('image', description='Input type, defaults to image') url: str = Field(..., description='The URL of the image') @@ -251,3 +248,32 @@ class LumaGeneration(BaseModel): assets: Optional[LumaAssets] = Field(None, description='The assets of the generation') model: str = Field(..., description='The model used for the generation') request: Union[LumaGenerationRequest, LumaImageGenerationRequest] = Field(..., description="The request used for the generation") + + +class Luma2ImageRef(BaseModel): + url: str | None = None + data: str | None = None + media_type: str | None = None + + +class Luma2GenerationRequest(BaseModel): + prompt: str = Field(..., min_length=1, max_length=6000) + model: str | None = None + type: str | None = None + aspect_ratio: str | None = None + style: str | None = None + output_format: str | None = None + web_search: bool | None = None + image_ref: list[Luma2ImageRef] | None = None + source: Luma2ImageRef | None = None + + +class Luma2Generation(BaseModel): + id: str | None = None + type: str | None = None + state: str | None = None + model: str | None = None + created_at: str | None = None + output: list[LumaImageReference] | None = None + failure_reason: str | None = None + failure_code: str | None = None diff --git a/comfy_api_nodes/apis/openai.py b/comfy_api_nodes/apis/openai.py index b85ef252b..bee75d639 100644 --- a/comfy_api_nodes/apis/openai.py +++ b/comfy_api_nodes/apis/openai.py @@ -56,14 +56,14 @@ class ModelResponseProperties(BaseModel): instructions: str | None = Field(None) max_output_tokens: int | None = Field(None) model: str | None = Field(None) - temperature: float | None = Field(1, description="Controls randomness in the response", ge=0.0, le=2.0) + temperature: float | None = Field(None, description="Controls randomness in the response", ge=0.0, le=2.0) top_p: float | None = Field( - 1, + None, description="Controls diversity of the response via nucleus sampling", ge=0.0, le=1.0, ) - truncation: str | None = Field("disabled", description="Allowed values: 'auto' or 'disabled'") + truncation: str | None = Field(None, description="Allowed values: 'auto' or 'disabled'") class ResponseProperties(BaseModel): diff --git a/comfy_api_nodes/nodes_luma.py b/comfy_api_nodes/nodes_luma.py index 9ed6cd299..d92a7c382 100644 --- a/comfy_api_nodes/nodes_luma.py +++ b/comfy_api_nodes/nodes_luma.py @@ -1,10 +1,11 @@ -from typing import Optional - import torch from typing_extensions import override -from comfy_api.latest import IO, ComfyExtension +from comfy_api.latest import IO, ComfyExtension, Input from comfy_api_nodes.apis.luma import ( + Luma2Generation, + Luma2GenerationRequest, + Luma2ImageRef, LumaAspectRatio, LumaCharacterRef, LumaConceptChain, @@ -30,6 +31,7 @@ from comfy_api_nodes.util import ( download_url_to_video_output, poll_op, sync_op, + upload_image_to_comfyapi, upload_images_to_comfyapi, validate_string, ) @@ -212,9 +214,9 @@ class LumaImageGenerationNode(IO.ComfyNode): aspect_ratio: str, seed, style_image_weight: float, - image_luma_ref: Optional[LumaReferenceChain] = None, - style_image: Optional[torch.Tensor] = None, - character_image: Optional[torch.Tensor] = None, + image_luma_ref: LumaReferenceChain | None = None, + style_image: torch.Tensor | None = None, + character_image: torch.Tensor | None = None, ) -> IO.NodeOutput: validate_string(prompt, strip_whitespace=True, min_length=3) # handle image_luma_ref @@ -434,7 +436,7 @@ class LumaTextToVideoGenerationNode(IO.ComfyNode): duration: str, loop: bool, seed, - luma_concepts: Optional[LumaConceptChain] = None, + luma_concepts: LumaConceptChain | None = None, ) -> IO.NodeOutput: validate_string(prompt, strip_whitespace=False, min_length=3) duration = duration if model != LumaVideoModel.ray_1_6 else None @@ -533,7 +535,6 @@ class LumaImageToVideoGenerationNode(IO.ComfyNode): ], is_api_node=True, price_badge=PRICE_BADGE_VIDEO, - ) @classmethod @@ -644,6 +645,293 @@ PRICE_BADGE_VIDEO = IO.PriceBadge( ) +def _luma2_uni1_common_inputs(max_image_refs: int) -> list: + return [ + IO.Combo.Input( + "style", + options=["auto", "manga"], + default="auto", + tooltip="Style preset. 'auto' picks based on the prompt; " + "'manga' applies a manga/anime aesthetic and requires a portrait " + "aspect ratio (2:3, 9:16, 1:2, 1:3).", + ), + IO.Boolean.Input( + "web_search", + default=False, + tooltip="Search the web for visual references before generating.", + ), + IO.Autogrow.Input( + "image_ref", + template=IO.Autogrow.TemplateNames( + IO.Image.Input("image"), + names=[f"image_{i}" for i in range(1, max_image_refs + 1)], + min=0, + ), + optional=True, + tooltip=f"Up to {max_image_refs} reference images for style/content guidance.", + ), + ] + + +async def _luma2_upload_image_refs( + cls: type[IO.ComfyNode], + refs: dict | None, + max_count: int, +) -> list[Luma2ImageRef] | None: + if not refs: + return None + out: list[Luma2ImageRef] = [] + for key in refs: + url = await upload_image_to_comfyapi(cls, refs[key]) + out.append(Luma2ImageRef(url=url)) + if len(out) > max_count: + raise ValueError(f"Maximum {max_count} reference images are allowed.") + return out or None + + +async def _luma2_submit_and_poll( + cls: type[IO.ComfyNode], + request: Luma2GenerationRequest, +) -> Input.Image: + initial = await sync_op( + cls, + ApiEndpoint(path="/proxy/luma_2/generations", method="POST"), + response_model=Luma2Generation, + data=request, + ) + if not initial.id: + raise RuntimeError("Luma 2 API did not return a generation id.") + final = await poll_op( + cls, + ApiEndpoint(path=f"/proxy/luma_2/generations/{initial.id}", method="GET"), + response_model=Luma2Generation, + status_extractor=lambda r: r.state, + progress_extractor=lambda r: None, + ) + if not final.output: + msg = final.failure_reason or "no output returned" + raise RuntimeError(f"Luma 2 generation failed: {msg}") + url = final.output[0].url + if not url: + raise RuntimeError("Luma 2 generation completed without an output URL.") + return await download_url_to_image_tensor(url) + + +class LumaImageNode(IO.ComfyNode): + + @classmethod + def define_schema(cls) -> IO.Schema: + return IO.Schema( + node_id="LumaImageNode2", + display_name="Luma UNI-1 Image", + category="api node/image/Luma", + description="Generate images from text using the Luma UNI-1 model.", + inputs=[ + IO.String.Input( + "prompt", + multiline=True, + default="", + tooltip="Text description of the desired image. 1–6000 characters.", + ), + IO.DynamicCombo.Input( + "model", + options=[ + IO.DynamicCombo.Option( + "uni-1", + [ + IO.Combo.Input( + "aspect_ratio", + options=[ + "auto", + "3:1", + "2:1", + "16:9", + "3:2", + "1:1", + "2:3", + "9:16", + "1:2", + "1:3", + ], + default="auto", + tooltip="Output image aspect ratio. 'auto' lets " + "the model pick based on the prompt.", + ), + *_luma2_uni1_common_inputs(max_image_refs=9), + ], + ), + IO.DynamicCombo.Option( + "uni-1-max", + [ + IO.Combo.Input( + "aspect_ratio", + options=[ + "auto", + "3:1", + "2:1", + "16:9", + "3:2", + "1:1", + "2:3", + "9:16", + "1:2", + "1:3", + ], + default="auto", + tooltip="Output image aspect ratio. 'auto' lets " + "the model pick based on the prompt.", + ), + *_luma2_uni1_common_inputs(max_image_refs=9), + ], + ), + ], + tooltip="Model to use for generation.", + ), + IO.Int.Input( + "seed", + default=0, + min=0, + max=2147483647, + control_after_generate=True, + tooltip="Seed controls whether the node should re-run; " + "results are non-deterministic regardless of seed.", + ), + ], + outputs=[IO.Image.Output()], + hidden=[ + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, + ], + is_api_node=True, + price_badge=IO.PriceBadge( + depends_on=IO.PriceBadgeDepends(widgets=["model"], input_groups=["model.image_ref"]), + expr=""" + ( + $m := widgets.model; + $refs := $lookup(inputGroups, "model.image_ref"); + $base := $m = "uni-1-max" ? 0.1 : 0.0404; + {"type":"usd","usd": $round($base + 0.003 * $refs, 4)} + ) + """, + ), + ) + + @classmethod + async def execute( + cls, + prompt: str, + model: dict, + seed: int, + ) -> IO.NodeOutput: + validate_string(prompt, min_length=1, max_length=6000) + aspect_ratio = model["aspect_ratio"] + style = model["style"] + allowed_manga_ratios = {"2:3", "9:16", "1:2", "1:3"} + if style == "manga" and aspect_ratio != "auto" and aspect_ratio not in allowed_manga_ratios: + raise ValueError( + f"'manga' style requires a portrait aspect ratio " + f"({', '.join(sorted(allowed_manga_ratios))}) or 'auto'; got '{aspect_ratio}'." + ) + request = Luma2GenerationRequest( + prompt=prompt, + model=model["model"], + type="image", + aspect_ratio=aspect_ratio if aspect_ratio != "auto" else None, + style=style if style != "auto" else None, + output_format="png", + web_search=model["web_search"], + image_ref=await _luma2_upload_image_refs(cls, model.get("image_ref"), max_count=9), + ) + return IO.NodeOutput(await _luma2_submit_and_poll(cls, request)) + + +class LumaImageEditNode(IO.ComfyNode): + + @classmethod + def define_schema(cls) -> IO.Schema: + return IO.Schema( + node_id="LumaImageEditNode2", + display_name="Luma UNI-1 Image Edit", + category="api node/image/Luma", + description="Edit an existing image with a text prompt using the Luma UNI-1 model.", + inputs=[ + IO.Image.Input( + "source", + tooltip="Source image to edit.", + ), + IO.String.Input( + "prompt", + multiline=True, + default="", + tooltip="Description of the desired edit. 1–6000 characters.", + ), + IO.DynamicCombo.Input( + "model", + options=[ + IO.DynamicCombo.Option( + "uni-1", + _luma2_uni1_common_inputs(max_image_refs=8), + ), + IO.DynamicCombo.Option( + "uni-1-max", + _luma2_uni1_common_inputs(max_image_refs=8), + ), + ], + tooltip="Model to use for editing.", + ), + IO.Int.Input( + "seed", + default=0, + min=0, + max=2147483647, + control_after_generate=True, + tooltip="Seed controls whether the node should re-run; " + "results are non-deterministic regardless of seed.", + ), + ], + outputs=[IO.Image.Output()], + hidden=[ + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, + ], + is_api_node=True, + price_badge=IO.PriceBadge( + depends_on=IO.PriceBadgeDepends(widgets=["model"], input_groups=["model.image_ref"]), + expr=""" + ( + $m := widgets.model; + $refs := $lookup(inputGroups, "model.image_ref"); + $base := $m = "uni-1-max" ? 0.103 : 0.0434; + {"type":"usd","usd": $round($base + 0.003 * $refs, 4)} + ) + """, + ), + ) + + @classmethod + async def execute( + cls, + source: Input.Image, + prompt: str, + model: dict, + seed: int, + ) -> IO.NodeOutput: + validate_string(prompt, min_length=1, max_length=6000) + request = Luma2GenerationRequest( + prompt=prompt, + model=model["model"], + type="image_edit", + source=Luma2ImageRef(url=await upload_image_to_comfyapi(cls, source)), + style=model["style"] if model["style"] != "auto" else None, + output_format="png", + web_search=model["web_search"], + image_ref=await _luma2_upload_image_refs(cls, model.get("image_ref"), max_count=8), + ) + return IO.NodeOutput(await _luma2_submit_and_poll(cls, request)) + + class LumaExtension(ComfyExtension): @override async def get_node_list(self) -> list[type[IO.ComfyNode]]: @@ -654,6 +942,8 @@ class LumaExtension(ComfyExtension): LumaImageToVideoGenerationNode, LumaReferenceNode, LumaConceptsNode, + LumaImageNode, + LumaImageEditNode, ] diff --git a/comfy_api_nodes/nodes_openai.py b/comfy_api_nodes/nodes_openai.py index 21fe470ce..daed495da 100644 --- a/comfy_api_nodes/nodes_openai.py +++ b/comfy_api_nodes/nodes_openai.py @@ -39,16 +39,18 @@ STARTING_POINT_ID_PATTERN = r"" class SupportedOpenAIModel(str, Enum): - o4_mini = "o4-mini" - o1 = "o1" - o3 = "o3" - o1_pro = "o1-pro" - gpt_4_1 = "gpt-4.1" - gpt_4_1_mini = "gpt-4.1-mini" - gpt_4_1_nano = "gpt-4.1-nano" + gpt_5_5_pro = "gpt-5.5-pro" + gpt_5_5 = "gpt-5.5" gpt_5 = "gpt-5" gpt_5_mini = "gpt-5-mini" gpt_5_nano = "gpt-5-nano" + gpt_4_1 = "gpt-4.1" + gpt_4_1_mini = "gpt-4.1-mini" + gpt_4_1_nano = "gpt-4.1-nano" + o4_mini = "o4-mini" + o3 = "o3" + o1_pro = "o1-pro" + o1 = "o1" async def validate_and_cast_response(response, timeout: int = None) -> torch.Tensor: @@ -739,6 +741,16 @@ class OpenAIChatNode(IO.ComfyNode): "usd": [0.002, 0.008], "format": { "approximate": true, "separator": "-", "suffix": " per 1K tokens" } } + : $contains($m, "gpt-5.5-pro") ? { + "type": "list_usd", + "usd": [0.03, 0.18], + "format": { "approximate": true, "separator": "-", "suffix": " per 1K tokens" } + } + : $contains($m, "gpt-5.5") ? { + "type": "list_usd", + "usd": [0.005, 0.03], + "format": { "approximate": true, "separator": "-", "suffix": " per 1K tokens" } + } : $contains($m, "gpt-5-nano") ? { "type": "list_usd", "usd": [0.00005, 0.0004], diff --git a/comfy_extras/nodes_ace.py b/comfy_extras/nodes_ace.py index 1602add84..affcf3b71 100644 --- a/comfy_extras/nodes_ace.py +++ b/comfy_extras/nodes_ace.py @@ -42,7 +42,7 @@ class TextEncodeAceStepAudio15(IO.ComfyNode): IO.Int.Input("bpm", default=120, min=10, max=300), IO.Float.Input("duration", default=120.0, min=0.0, max=2000.0, step=0.1), IO.Combo.Input("timesignature", options=['2', '3', '4', '6']), - IO.Combo.Input("language", options=["en", "ja", "zh", "es", "de", "fr", "pt", "ru", "it", "nl", "pl", "tr", "vi", "cs", "fa", "id", "ko", "uk", "hu", "ar", "sv", "ro", "el"]), + IO.Combo.Input("language", options=['ar', 'az', 'bg', 'bn', 'ca', 'cs', 'da', 'de', 'el', 'en', 'es', 'fa', 'fi', 'fr', 'he', 'hi', 'hr', 'ht', 'hu', 'id', 'is', 'it', 'ja', 'ko', 'la', 'lt', 'ms', 'ne', 'nl', 'no', 'pa', 'pl', 'pt', 'ro', 'ru', 'sa', 'sk', 'sr', 'sv', 'sw', 'ta', 'te', 'th', 'tl', 'tr', 'uk', 'ur', 'vi', 'yue', 'zh', 'unknown'], default='en'), IO.Combo.Input("keyscale", options=[f"{root} {quality}" for quality in ["major", "minor"] for root in ["C", "C#", "Db", "D", "D#", "Eb", "E", "F", "F#", "Gb", "G", "G#", "Ab", "A", "A#", "Bb", "B"]]), IO.Boolean.Input("generate_audio_codes", default=True, tooltip="Enable the LLM that generates audio codes. This can be slow but will increase the quality of the generated audio. Turn this off if you are giving the model an audio reference.", advanced=True), IO.Float.Input("cfg_scale", default=2.0, min=0.0, max=100.0, step=0.1, advanced=True), diff --git a/comfy_extras/nodes_context_windows.py b/comfy_extras/nodes_context_windows.py index 0e43f2e44..fefc56d26 100644 --- a/comfy_extras/nodes_context_windows.py +++ b/comfy_extras/nodes_context_windows.py @@ -29,6 +29,7 @@ class ContextWindowsManualNode(io.ComfyNode): io.Boolean.Input("freenoise", default=False, tooltip="Whether to apply FreeNoise noise shuffling, improves window blending."), io.String.Input("cond_retain_index_list", default="", tooltip="List of latent indices to retain in the conditioning tensors for each window, for example setting this to '0' will use the initial start image for each window."), io.Boolean.Input("split_conds_to_windows", default=False, tooltip="Whether to split multiple conditionings (created by ConditionCombine) to each window based on region index."), + io.Boolean.Input("causal_window_fix", default=True, tooltip="Whether to add a causal fix frame to non-0-indexed context windows."), ], outputs=[ io.Model.Output(tooltip="The model with context windows applied during sampling."), @@ -38,7 +39,7 @@ class ContextWindowsManualNode(io.ComfyNode): @classmethod def execute(cls, model: io.Model.Type, context_length: int, context_overlap: int, context_schedule: str, context_stride: int, closed_loop: bool, fuse_method: str, dim: int, freenoise: bool, - cond_retain_index_list: list[int]=[], split_conds_to_windows: bool=False) -> io.Model: + cond_retain_index_list: list[int]=[], split_conds_to_windows: bool=False, causal_window_fix: bool=True) -> io.Model: model = model.clone() model.model_options["context_handler"] = comfy.context_windows.IndexListContextHandler( context_schedule=comfy.context_windows.get_matching_context_schedule(context_schedule), @@ -50,7 +51,8 @@ class ContextWindowsManualNode(io.ComfyNode): dim=dim, freenoise=freenoise, cond_retain_index_list=cond_retain_index_list, - split_conds_to_windows=split_conds_to_windows + split_conds_to_windows=split_conds_to_windows, + causal_window_fix=causal_window_fix, ) # make memory usage calculation only take into account the context window latents comfy.context_windows.create_prepare_sampling_wrapper(model) diff --git a/comfy_extras/nodes_images.py b/comfy_extras/nodes_images.py index 68916ab75..1ac740d1d 100644 --- a/comfy_extras/nodes_images.py +++ b/comfy_extras/nodes_images.py @@ -716,7 +716,7 @@ class SplitImageToTileList(IO.ComfyNode): def get_grid_coords(width, height, tile_width, tile_height, overlap): coords = [] stride_x = round(max(tile_width * 0.25, tile_width - overlap)) - stride_y = round(max(tile_width * 0.25, tile_height - overlap)) + stride_y = round(max(tile_height * 0.25, tile_height - overlap)) y = 0 while y < height: diff --git a/comfy_extras/nodes_lt_audio.py b/comfy_extras/nodes_lt_audio.py index 3ec635c75..2c1f63afb 100644 --- a/comfy_extras/nodes_lt_audio.py +++ b/comfy_extras/nodes_lt_audio.py @@ -147,7 +147,6 @@ class LTXVEmptyLatentAudio(io.ComfyNode): z_channels = audio_vae.latent_channels audio_freq = audio_vae.first_stage_model.latent_frequency_bins - sampling_rate = int(audio_vae.first_stage_model.sample_rate) num_audio_latents = audio_vae.first_stage_model.num_of_latents_from_frames(frames_number, frame_rate) @@ -159,7 +158,6 @@ class LTXVEmptyLatentAudio(io.ComfyNode): return io.NodeOutput( { "samples": audio_latents, - "sample_rate": sampling_rate, "type": "audio", } ) diff --git a/comfy_extras/nodes_primitive.py b/comfy_extras/nodes_primitive.py index 3c8f90b19..33373266b 100644 --- a/comfy_extras/nodes_primitive.py +++ b/comfy_extras/nodes_primitive.py @@ -9,7 +9,8 @@ class String(io.ComfyNode): def define_schema(cls): return io.Schema( node_id="PrimitiveString", - display_name="String", + search_aliases=["text", "string", "text box", "prompt"], + display_name="Text String", category="utils/primitive", inputs=[ io.String.Input("value"), @@ -27,7 +28,8 @@ class StringMultiline(io.ComfyNode): def define_schema(cls): return io.Schema( node_id="PrimitiveStringMultiline", - display_name="String (Multiline)", + search_aliases=["text", "string", "text multiline", "string multiline", "text box", "prompt"], + display_name="Text String (Multiline)", category="utils/primitive", essentials_category="Basics", inputs=[ diff --git a/comfy_extras/nodes_string.py b/comfy_extras/nodes_string.py index 604076c4e..925a40da8 100644 --- a/comfy_extras/nodes_string.py +++ b/comfy_extras/nodes_string.py @@ -10,9 +10,9 @@ class StringConcatenate(io.ComfyNode): def define_schema(cls): return io.Schema( node_id="StringConcatenate", - display_name="Text Concatenate", - category="utils/string", - search_aliases=["Concatenate", "text concat", "join text", "merge text", "combine strings", "concat", "concatenate", "append text", "combine text", "string"], + search_aliases=["concatenate", "text concat", "join text", "merge text", "combine strings", "string concat", "append text", "combine text"], + display_name="Concatenate Text", + category="text", inputs=[ io.String.Input("string_a", multiline=True), io.String.Input("string_b", multiline=True), @@ -33,9 +33,9 @@ class StringSubstring(io.ComfyNode): def define_schema(cls): return io.Schema( node_id="StringSubstring", - search_aliases=["Substring", "extract text", "text portion"], - display_name="Text Substring", - category="utils/string", + search_aliases=["substring", "extract text", "text portion"], + display_name="Substring", + category="text", inputs=[ io.String.Input("string", multiline=True), io.Int.Input("start"), @@ -58,7 +58,7 @@ class StringLength(io.ComfyNode): node_id="StringLength", search_aliases=["character count", "text size", "string length"], display_name="Text Length", - category="utils/string", + category="text", inputs=[ io.String.Input("string", multiline=True), ], @@ -77,9 +77,9 @@ class CaseConverter(io.ComfyNode): def define_schema(cls): return io.Schema( node_id="CaseConverter", - search_aliases=["Case Converter", "text case", "uppercase", "lowercase", "capitalize"], - display_name="Text Case Converter", - category="utils/string", + search_aliases=["case converter", "text case", "uppercase", "lowercase", "capitalize"], + display_name="Convert Text Case", + category="text", inputs=[ io.String.Input("string", multiline=True), io.Combo.Input("mode", options=["UPPERCASE", "lowercase", "Capitalize", "Title Case"]), @@ -110,9 +110,9 @@ class StringTrim(io.ComfyNode): def define_schema(cls): return io.Schema( node_id="StringTrim", - search_aliases=["Trim", "clean whitespace", "remove whitespace", "strip"], - display_name="Text Trim", - category="utils/string", + search_aliases=["trim", "clean whitespace", "remove whitespace", "remove spaces","strip"], + display_name="Trim Text", + category="text", inputs=[ io.String.Input("string", multiline=True), io.Combo.Input("mode", options=["Both", "Left", "Right"]), @@ -141,9 +141,9 @@ class StringReplace(io.ComfyNode): def define_schema(cls): return io.Schema( node_id="StringReplace", - search_aliases=["Replace", "find and replace", "substitute", "swap text"], - display_name="Text Replace", - category="utils/string", + search_aliases=["replace", "find and replace", "substitute", "swap text"], + display_name="Replace Text", + category="text", inputs=[ io.String.Input("string", multiline=True), io.String.Input("find", multiline=True), @@ -164,9 +164,9 @@ class StringContains(io.ComfyNode): def define_schema(cls): return io.Schema( node_id="StringContains", - search_aliases=["Contains", "text includes", "string includes"], - display_name="Text Contains", - category="utils/string", + search_aliases=["contains", "text includes", "string includes"], + display_name="Contains Text", + category="text", inputs=[ io.String.Input("string", multiline=True), io.String.Input("substring", multiline=True), @@ -192,9 +192,9 @@ class StringCompare(io.ComfyNode): def define_schema(cls): return io.Schema( node_id="StringCompare", - search_aliases=["Compare", "text match", "string equals", "starts with", "ends with"], - display_name="Text Compare", - category="utils/string", + search_aliases=["compare", "text match", "string equals", "starts with", "ends with"], + display_name="Compare Text", + category="text", inputs=[ io.String.Input("string_a", multiline=True), io.String.Input("string_b", multiline=True), @@ -228,9 +228,9 @@ class RegexMatch(io.ComfyNode): def define_schema(cls): return io.Schema( node_id="RegexMatch", - search_aliases=["Regex Match", "regex", "pattern match", "text contains", "string match"], - display_name="Text Match", - category="utils/string", + search_aliases=["regex match", "regex", "pattern match", "text contains", "string match"], + display_name="Match Text", + category="text", inputs=[ io.String.Input("string", multiline=True), io.String.Input("regex_pattern", multiline=True), @@ -269,9 +269,9 @@ class RegexExtract(io.ComfyNode): def define_schema(cls): return io.Schema( node_id="RegexExtract", - search_aliases=["Regex Extract", "regex", "pattern extract", "text parser", "parse text"], - display_name="Text Extract Substring", - category="utils/string", + search_aliases=["regex extract", "regex", "pattern extract", "text parser", "parse text"], + display_name="Extract Text", + category="text", inputs=[ io.String.Input("string", multiline=True), io.String.Input("regex_pattern", multiline=True), @@ -344,9 +344,9 @@ class RegexReplace(io.ComfyNode): def define_schema(cls): return io.Schema( node_id="RegexReplace", - search_aliases=["Regex Replace", "regex", "pattern replace", "regex replace", "substitution"], - display_name="Text Replace (Regex)", - category="utils/string", + search_aliases=["regex replace", "regex", "pattern replace", "substitution"], + display_name="Replace Text (Regex)", + category="text", description="Find and replace text using regex patterns.", inputs=[ io.String.Input("string", multiline=True), @@ -381,8 +381,8 @@ class JsonExtractString(io.ComfyNode): def define_schema(cls): return io.Schema( node_id="JsonExtractString", - display_name="Extract String from JSON", - category="utils/string", + display_name="Extract Text from JSON", + category="text", search_aliases=["json", "extract json", "parse json", "json value", "read json"], inputs=[ io.String.Input("json_string", multiline=True), diff --git a/comfy_extras/nodes_void.py b/comfy_extras/nodes_void.py new file mode 100644 index 000000000..e7a8f3757 --- /dev/null +++ b/comfy_extras/nodes_void.py @@ -0,0 +1,483 @@ +import logging + +import torch + +import comfy +import comfy.model_management +import comfy.model_patcher +import comfy.samplers +import comfy.utils +import folder_paths +import node_helpers +import nodes +from comfy.utils import model_trange as trange +from comfy_api.latest import ComfyExtension, io +from torchvision.models.optical_flow import raft_large +from typing_extensions import override + + +from comfy_extras.void_noise_warp import RaftOpticalFlow, get_noise_from_video + +OpticalFlow = io.Custom("OPTICAL_FLOW") + +TEMPORAL_COMPRESSION = 4 +PATCH_SIZE_T = 2 + + +def _valid_void_length(length: int) -> int: + """Round ``length`` down to a value that produces an even latent_t. + + VOID / CogVideoX-Fun-V1.5 uses patch_size_t=2, so the VAE-encoded latent + must have an even temporal dimension. If latent_t is odd, the transformer + pad_to_patch_size circular-wraps an extra latent frame onto the end; after + the post-transformer crop the last real latent frame has been influenced + by the wrapped phantom frame, producing visible jitter and "disappearing" + subjects near the end of the decoded video. Rounding down fixes this. + """ + latent_t = ((length - 1) // TEMPORAL_COMPRESSION) + 1 + if latent_t % PATCH_SIZE_T == 0: + return length + # Round latent_t down to the nearest multiple of PATCH_SIZE_T, then invert + # the ((length - 1) // TEMPORAL_COMPRESSION) + 1 formula. Floor at 1 frame + # so we never return a non-positive length. + target_latent_t = max(PATCH_SIZE_T, (latent_t // PATCH_SIZE_T) * PATCH_SIZE_T) + return (target_latent_t - 1) * TEMPORAL_COMPRESSION + 1 + + +class OpticalFlowLoader(io.ComfyNode): + """Load an optical flow model from ``models/optical_flow/``. + + Only torchvision's RAFT-large format is recognized today (the model used + by VOIDWarpedNoise). The checkpoint must be placed under + ``models/optical_flow/`` — ComfyUI never downloads optical-flow weights + at runtime. + """ + + @classmethod + def define_schema(cls): + return io.Schema( + node_id="OpticalFlowLoader", + display_name="Load Optical Flow Model", + category="loaders", + inputs=[ + io.Combo.Input( + "model_name", + options=folder_paths.get_filename_list("optical_flow"), + tooltip=( + "Optical flow model to load. Files must be placed in the " + "'optical_flow' folder. Today only torchvision's " + "raft_large.pth is supported." + ), + ), + ], + outputs=[ + OpticalFlow.Output(), + ], + ) + + @classmethod + def execute(cls, model_name) -> io.NodeOutput: + + model_path = folder_paths.get_full_path_or_raise("optical_flow", model_name) + sd = comfy.utils.load_torch_file(model_path, safe_load=True) + + has_raft_keys = ( + any(k.startswith("feature_encoder.") for k in sd) + and any(k.startswith("context_encoder.") for k in sd) + and any(k.startswith("update_block.") for k in sd) + ) + if not has_raft_keys: + raise ValueError( + "Unrecognized optical flow model format: expected a torchvision " + "RAFT-large state dict with 'feature_encoder.', 'context_encoder.' " + "and 'update_block.' prefixes." + ) + + model = raft_large(weights=None, progress=False) + model.load_state_dict(sd) + model.eval().to(torch.float32) + + patcher = comfy.model_patcher.ModelPatcher( + model, + load_device=comfy.model_management.get_torch_device(), + offload_device=comfy.model_management.unet_offload_device(), + ) + return io.NodeOutput(patcher) + + +class VOIDQuadmaskPreprocess(io.ComfyNode): + """Preprocess a quadmask video for VOID inpainting. + + Quantizes mask values to four semantic levels, inverts, and normalizes: + 0 -> primary object to remove + 63 -> overlap of primary + affected + 127 -> affected region (interactions) + 255 -> background (keep) + + After inversion and normalization, the output mask has values in [0, 1] + with four discrete levels: 1.0 (remove), ~0.75, ~0.50, 0.0 (keep). + """ + + @classmethod + def define_schema(cls): + return io.Schema( + node_id="VOIDQuadmaskPreprocess", + category="mask/video", + inputs=[ + io.Mask.Input("mask"), + io.Int.Input("dilate_width", default=0, min=0, max=50, step=1, + tooltip="Dilation radius for the primary mask region (0 = no dilation)"), + ], + outputs=[ + io.Mask.Output(display_name="quadmask"), + ], + ) + + @classmethod + def execute(cls, mask, dilate_width=0) -> io.NodeOutput: + m = mask.clone() + + if m.max() <= 1.0: + m = m * 255.0 + + if dilate_width > 0 and m.ndim >= 3: + binary = (m < 128).float() + kernel_size = dilate_width * 2 + 1 + if binary.ndim == 3: + binary = binary.unsqueeze(1) + dilated = torch.nn.functional.max_pool2d( + binary, kernel_size=kernel_size, stride=1, padding=dilate_width + ) + if dilated.ndim == 4: + dilated = dilated.squeeze(1) + m = torch.where(dilated > 0.5, torch.zeros_like(m), m) + + m = torch.where(m <= 31, torch.zeros_like(m), m) + m = torch.where((m > 31) & (m <= 95), torch.full_like(m, 63), m) + m = torch.where((m > 95) & (m <= 191), torch.full_like(m, 127), m) + m = torch.where(m > 191, torch.full_like(m, 255), m) + + m = (255.0 - m) / 255.0 + + return io.NodeOutput(m) + + +class VOIDInpaintConditioning(io.ComfyNode): + """Build VOID inpainting conditioning for CogVideoX. + + Encodes the processed quadmask and masked source video through the VAE, + producing a 32-channel concat conditioning (16ch mask + 16ch masked video) + that gets concatenated with the 16ch noise latent by the model. + """ + + @classmethod + def define_schema(cls): + return io.Schema( + node_id="VOIDInpaintConditioning", + category="conditioning/video_models", + inputs=[ + io.Conditioning.Input("positive"), + io.Conditioning.Input("negative"), + io.Vae.Input("vae"), + io.Image.Input("video", tooltip="Source video frames [T, H, W, 3]"), + io.Mask.Input("quadmask", tooltip="Preprocessed quadmask from VOIDQuadmaskPreprocess [T, H, W]"), + io.Int.Input("width", default=672, min=16, max=nodes.MAX_RESOLUTION, step=8), + io.Int.Input("height", default=384, min=16, max=nodes.MAX_RESOLUTION, step=8), + io.Int.Input("length", default=45, min=1, max=nodes.MAX_RESOLUTION, step=1, + tooltip="Number of pixel frames to process. For CogVideoX-Fun-V1.5 " + "(patch_size_t=2), latent_t must be even — lengths that " + "produce odd latent_t are rounded down (e.g. 49 → 45)."), + io.Int.Input("batch_size", default=1, min=1, max=64), + ], + outputs=[ + io.Conditioning.Output(display_name="positive"), + io.Conditioning.Output(display_name="negative"), + io.Latent.Output(display_name="latent"), + ], + ) + + @classmethod + def execute(cls, positive, negative, vae, video, quadmask, + width, height, length, batch_size) -> io.NodeOutput: + + adjusted_length = _valid_void_length(length) + if adjusted_length != length: + logging.warning( + "VOIDInpaintConditioning: rounding length %d down to %d so that " + "latent_t is even (required by CogVideoX-Fun-V1.5 patch_size_t=2). " + "Using odd latent_t causes the last frame to be corrupted by " + "circular padding.", length, adjusted_length, + ) + length = adjusted_length + + latent_t = ((length - 1) // TEMPORAL_COMPRESSION) + 1 + latent_h = height // 8 + latent_w = width // 8 + + vid = video[:length] + vid = comfy.utils.common_upscale( + vid.movedim(-1, 1), width, height, "bilinear", "center" + ).movedim(1, -1) + + qm = quadmask[:length] + if qm.ndim == 3: + qm = qm.unsqueeze(-1) + qm = comfy.utils.common_upscale( + qm.movedim(-1, 1), width, height, "bilinear", "center" + ).movedim(1, -1) + if qm.ndim == 4 and qm.shape[-1] == 1: + qm = qm.squeeze(-1) + + mask_condition = qm + if mask_condition.ndim == 3: + mask_condition_3ch = mask_condition.unsqueeze(-1).expand(-1, -1, -1, 3) + else: + mask_condition_3ch = mask_condition + + inverted_mask_3ch = 1.0 - mask_condition_3ch + masked_video = vid[:, :, :, :3] * (1.0 - mask_condition_3ch) + + mask_latents = vae.encode(inverted_mask_3ch) + masked_video_latents = vae.encode(masked_video) + + def _match_temporal(lat, target_t): + if lat.shape[2] > target_t: + return lat[:, :, :target_t] + elif lat.shape[2] < target_t: + pad = target_t - lat.shape[2] + return torch.cat([lat, lat[:, :, -1:].repeat(1, 1, pad, 1, 1)], dim=2) + return lat + + mask_latents = _match_temporal(mask_latents, latent_t) + masked_video_latents = _match_temporal(masked_video_latents, latent_t) + + inpaint_latents = torch.cat([mask_latents, masked_video_latents], dim=1) + + # No explicit scaling needed here: the model's CogVideoX.concat_cond() + # applies process_latent_in (×latent_format.scale_factor) to each 16-ch + # block of the stored conditioning. For 5b-class checkpoints (incl. the + # VOID/CogVideoX-Fun-V1.5 inpainting model) that scale_factor is auto- + # selected as 0.7 in supported_models.CogVideoX_T2V, which matches the + # diffusers vae/config.json scaling_factor VOID was trained with. + + positive = node_helpers.conditioning_set_values( + positive, {"concat_latent_image": inpaint_latents} + ) + negative = node_helpers.conditioning_set_values( + negative, {"concat_latent_image": inpaint_latents} + ) + + noise_latent = torch.zeros( + [batch_size, 16, latent_t, latent_h, latent_w], + device=comfy.model_management.intermediate_device() + ) + + return io.NodeOutput(positive, negative, {"samples": noise_latent}) + + +class VOIDWarpedNoise(io.ComfyNode): + """Generate optical-flow warped noise for VOID Pass 2 refinement. + + Takes the Pass 1 output video and produces temporally-correlated noise + by warping Gaussian noise along optical flow vectors. This noise is used + as the initial latent for Pass 2, resulting in better temporal consistency. + """ + + @classmethod + def define_schema(cls): + return io.Schema( + node_id="VOIDWarpedNoise", + category="latent/video", + inputs=[ + OpticalFlow.Input( + "optical_flow", + tooltip="Optical flow model from OpticalFlowLoader (RAFT-large).", + ), + io.Image.Input("video", tooltip="Pass 1 output video frames [T, H, W, 3]"), + io.Int.Input("width", default=672, min=16, max=nodes.MAX_RESOLUTION, step=8), + io.Int.Input("height", default=384, min=16, max=nodes.MAX_RESOLUTION, step=8), + io.Int.Input("length", default=45, min=1, max=nodes.MAX_RESOLUTION, step=1, + tooltip="Number of pixel frames. Rounded down to make latent_t " + "even (patch_size_t=2 requirement), e.g. 49 → 45."), + io.Int.Input("batch_size", default=1, min=1, max=64), + ], + outputs=[ + io.Latent.Output(display_name="warped_noise"), + ], + ) + + @classmethod + def execute(cls, optical_flow, video, width, height, length, batch_size) -> io.NodeOutput: + + adjusted_length = _valid_void_length(length) + if adjusted_length != length: + logging.warning( + "VOIDWarpedNoise: rounding length %d down to %d so that " + "latent_t is even (required by CogVideoX-Fun-V1.5 patch_size_t=2).", + length, adjusted_length, + ) + length = adjusted_length + + latent_t = ((length - 1) // TEMPORAL_COMPRESSION) + 1 + latent_h = height // 8 + latent_w = width // 8 + + # RAFT + noise warp is real compute, not an "intermediate" buffer, so + # we want the actual torch device (CUDA/MPS). The final latent is + # moved back to intermediate_device() before returning to match the + # rest of the ComfyUI pipeline. + device = comfy.model_management.get_torch_device() + + comfy.model_management.load_model_gpu(optical_flow) + raft = RaftOpticalFlow(optical_flow.model, device=device) + + vid = video[:length].to(device) + vid = comfy.utils.common_upscale( + vid.movedim(-1, 1), width, height, "bilinear", "center" + ).movedim(1, -1) + vid_uint8 = (vid.clamp(0, 1) * 255).to(torch.uint8) + + FRAME = 2**-1 + FLOW = 2**3 + LATENT_SCALE = 8 + + warped = get_noise_from_video( + vid_uint8, + raft, + noise_channels=16, + resize_frames=FRAME, + resize_flow=FLOW, + downscale_factor=round(FRAME * FLOW) * LATENT_SCALE, + device=device, + ) + + if warped.shape[0] != latent_t: + indices = torch.linspace(0, warped.shape[0] - 1, latent_t, + device=device).long() + warped = warped[indices] + + if warped.shape[1] != latent_h or warped.shape[2] != latent_w: + # (T, H, W, C) → (T, C, H, W) → bilinear resize → back + warped = warped.permute(0, 3, 1, 2) + warped = torch.nn.functional.interpolate( + warped, size=(latent_h, latent_w), + mode="bilinear", align_corners=False, + ) + warped = warped.permute(0, 2, 3, 1) + + # (T, H, W, C) → (B, C, T, H, W) + warped_tensor = warped.permute(3, 0, 1, 2).unsqueeze(0) + if batch_size > 1: + warped_tensor = warped_tensor.repeat(batch_size, 1, 1, 1, 1) + + warped_tensor = warped_tensor.to(comfy.model_management.intermediate_device()) + return io.NodeOutput({"samples": warped_tensor}) + + +class Noise_FromLatent: + """Wraps a pre-computed LATENT tensor as a NOISE source.""" + def __init__(self, latent_dict): + self.seed = 0 + self._samples = latent_dict["samples"] + + def generate_noise(self, input_latent): + return self._samples.clone().cpu() + + +class VOIDWarpedNoiseSource(io.ComfyNode): + """Convert a LATENT (e.g. from VOIDWarpedNoise) into a NOISE source + for use with SamplerCustomAdvanced.""" + + @classmethod + def define_schema(cls): + return io.Schema( + node_id="VOIDWarpedNoiseSource", + category="sampling/custom_sampling/noise", + inputs=[ + io.Latent.Input("warped_noise", + tooltip="Warped noise latent from VOIDWarpedNoise"), + ], + outputs=[io.Noise.Output()], + ) + + @classmethod + def execute(cls, warped_noise) -> io.NodeOutput: + return io.NodeOutput(Noise_FromLatent(warped_noise)) + + +class VOID_DDIM(comfy.samplers.Sampler): + """DDIM sampler for VOID inpainting models. + + VOID was trained with the diffusers CogVideoXDDIMScheduler which operates in + alpha-space (input std ≈ 1). The standard KSampler applies noise_scaling that + multiplies by sqrt(1+sigma^2) ≈ 4500x, which is incompatible with VOID's + training. This sampler skips noise_scaling and implements the DDIM update rule + directly using sigma-to-alpha conversion. + """ + + def sample(self, model_wrap, sigmas, extra_args, callback, noise, latent_image=None, denoise_mask=None, disable_pbar=False): + x = noise.to(torch.float32) + model_options = extra_args.get("model_options", {}) + seed = extra_args.get("seed", None) + s_in = x.new_ones([x.shape[0]]) + + for i in trange(len(sigmas) - 1, disable=disable_pbar): + sigma = sigmas[i] + sigma_next = sigmas[i + 1] + + denoised = model_wrap(x, sigma * s_in, model_options=model_options, seed=seed) + + if callback is not None: + callback(i, denoised, x, len(sigmas) - 1) + + if sigma_next == 0: + x = denoised + else: + alpha_t = 1.0 / (1.0 + sigma ** 2) + alpha_prev = 1.0 / (1.0 + sigma_next ** 2) + + pred_eps = (x - (alpha_t ** 0.5) * denoised) / (1.0 - alpha_t) ** 0.5 + x = (alpha_prev ** 0.5) * denoised + (1.0 - alpha_prev) ** 0.5 * pred_eps + + return x + + +class VOIDSampler(io.ComfyNode): + """VOID DDIM sampler for use with SamplerCustom / SamplerCustomAdvanced. + + Required for VOID inpainting models. Implements the same DDIM loop that VOID + was trained with (diffusers CogVideoXDDIMScheduler), without the noise_scaling + that the standard KSampler applies. Use with RandomNoise or VOIDWarpedNoiseSource. + """ + + @classmethod + def define_schema(cls): + return io.Schema( + node_id="VOIDSampler", + category="sampling/custom_sampling/samplers", + inputs=[], + outputs=[io.Sampler.Output()], + ) + + @classmethod + def execute(cls) -> io.NodeOutput: + return io.NodeOutput(VOID_DDIM()) + + get_sampler = execute + + +class VOIDExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[io.ComfyNode]]: + return [ + OpticalFlowLoader, + VOIDQuadmaskPreprocess, + VOIDInpaintConditioning, + VOIDWarpedNoise, + VOIDWarpedNoiseSource, + VOIDSampler, + ] + + +async def comfy_entrypoint() -> VOIDExtension: + return VOIDExtension() diff --git a/comfy_extras/void_noise_warp.py b/comfy_extras/void_noise_warp.py new file mode 100644 index 000000000..fcc9a5f8b --- /dev/null +++ b/comfy_extras/void_noise_warp.py @@ -0,0 +1,494 @@ +""" +Optical-flow-warped noise for VOID Pass 2 refinement. + +Adapted from RyannDaGreat/CommonSource (MIT License, Ryan Burgert): + https://github.com/RyannDaGreat/CommonSource + - noise_warp.py (NoiseWarper / warp_xyωc / regaussianize / get_noise_from_video) + - raft.py (RaftOpticalFlow) + +Only the code paths that ``comfy_extras/nodes_void.py::VOIDWarpedNoise`` actually +uses (torch THWC uint8 input, no background removal, no visualization, no disk +I/O, default warp/noise params) have been inlined. External ``rp`` utilities +have been replaced with equivalents from torch.nn.functional / einops. The +RAFT optical-flow model itself is loaded offline via ``OpticalFlowLoader`` in +``nodes_void.py`` and passed into ``get_noise_from_video`` by the caller; this +module never downloads weights at runtime. +""" + +import logging +from typing import Optional + +import torch +import torch.nn.functional as F +from einops import rearrange + +import comfy.model_management + + +# --------------------------------------------------------------------------- +# Low-level torch image helpers (drop-in replacements for rp.torch_* primitives) +# --------------------------------------------------------------------------- + +def _torch_resize_chw(image, size, interp, copy=True): + """Resize a CHW tensor. + + ``size`` is either a scalar factor or a (h, w) tuple. ``interp`` is one + of ``"bilinear"``, ``"nearest"``, ``"area"``. When ``copy`` is False and + the requested size matches the input, returns the input tensor as is + (faster but callers must not mutate the result). + """ + if image.ndim != 3: + raise ValueError( + f"_torch_resize_chw expects a 3D CHW tensor, got shape {tuple(image.shape)}" + ) + _, in_h, in_w = image.shape + if isinstance(size, (int, float)) and not isinstance(size, bool): + new_h = max(1, int(in_h * size)) + new_w = max(1, int(in_w * size)) + else: + new_h, new_w = size + + if (new_h, new_w) == (in_h, in_w): + return image.clone() if copy else image + + kwargs = {} + if interp in ("bilinear", "bicubic"): + kwargs["align_corners"] = False + out = F.interpolate(image[None], size=(new_h, new_w), mode=interp, **kwargs)[0] + return out + + +def _torch_remap_relative(image, dx, dy, interp="bilinear"): + """Relative remap of a CHW image via ``F.grid_sample``. + + Equivalent to ``rp.torch_remap_image(image, dx, dy, relative=True, interp=interp)`` + for ``interp`` in {"bilinear", "nearest"}. Out-of-bounds samples are 0. + """ + if image.ndim != 3: + raise ValueError( + f"_torch_remap_relative expects a 3D CHW tensor, got shape {tuple(image.shape)}" + ) + if dx.shape != dy.shape: + raise ValueError( + f"_torch_remap_relative: dx and dy must match, got {tuple(dx.shape)} vs {tuple(dy.shape)}" + ) + _, h, w = image.shape + + x_abs = dx + torch.arange(w, device=dx.device, dtype=dx.dtype) + y_abs = dy + torch.arange(h, device=dy.device, dtype=dy.dtype)[:, None] + + x_norm = (x_abs / (w - 1)) * 2 - 1 + y_norm = (y_abs / (h - 1)) * 2 - 1 + + grid = torch.stack([x_norm, y_norm], dim=-1)[None].to(image.dtype) + out = F.grid_sample( + image[None], grid, mode=interp, align_corners=True, padding_mode="zeros" + )[0] + return out + + +def _torch_scatter_add_relative(image, dx, dy): + """Scatter-add a CHW image using relative floor-rounded (dx, dy) offsets. + + Equivalent to ``rp.torch_scatter_add_image(image, dx, dy, relative=True, + interp='floor')``. Out-of-bounds targets are dropped. + """ + if image.ndim != 3: + raise ValueError( + f"_torch_scatter_add_relative expects a 3D CHW tensor, got shape {tuple(image.shape)}" + ) + in_c, in_h, in_w = image.shape + if dx.shape != (in_h, in_w) or dy.shape != (in_h, in_w): + raise ValueError( + f"_torch_scatter_add_relative: dx/dy must be ({in_h}, {in_w}), " + f"got dx={tuple(dx.shape)} dy={tuple(dy.shape)}" + ) + + x = dx.long() + torch.arange(in_w, device=dx.device, dtype=torch.long) + y = dy.long() + torch.arange(in_h, device=dy.device, dtype=torch.long)[:, None] + + valid = ((y >= 0) & (y < in_h) & (x >= 0) & (x < in_w)).reshape(-1) + indices = (y * in_w + x).reshape(-1)[valid] + + flat_image = rearrange(image, "c h w -> (h w) c")[valid] + out = torch.zeros((in_h * in_w, in_c), dtype=image.dtype, device=image.device) + out.index_add_(0, indices, flat_image) + return rearrange(out, "(h w) c -> c h w", h=in_h, w=in_w) + + +# --------------------------------------------------------------------------- +# Noise warping primitives (ported from noise_warp.py) +# --------------------------------------------------------------------------- + +def unique_pixels(image): + """Find unique pixel values in a CHW tensor. + + Returns ``(unique_colors [U, C], counts [U], index_matrix [H, W])`` where + ``index_matrix[i, j]`` is the index of the unique color at that pixel. + """ + _, h, w = image.shape + flat = rearrange(image, "c h w -> (h w) c") + unique_colors, inverse_indices, counts = torch.unique( + flat, dim=0, return_inverse=True, return_counts=True, sorted=False, + ) + index_matrix = rearrange(inverse_indices, "(h w) -> h w", h=h, w=w) + return unique_colors, counts, index_matrix + + +def sum_indexed_values(image, index_matrix): + """For each unique index, sum the CHW image values at its pixels.""" + _, h, w = image.shape + u = int(index_matrix.max().item()) + 1 + flat = rearrange(image, "c h w -> (h w) c") + out = torch.zeros((u, flat.shape[1]), dtype=flat.dtype, device=flat.device) + out.index_add_(0, index_matrix.view(-1), flat) + return out + + +def indexed_to_image(index_matrix, unique_colors): + """Build a CHW image from an index matrix and a (U, C) color table.""" + h, w = index_matrix.shape + flat = unique_colors[index_matrix.view(-1)] + return rearrange(flat, "(h w) c -> c h w", h=h, w=w) + + +def regaussianize(noise): + """Variance-preserving re-sampling of a CHW noise tensor. + + Wherever the noise contains groups of identical pixel values (e.g. after + a nearest-neighbor warp that duplicated source pixels), adds zero-mean + foreign noise within each group and scales by ``1/sqrt(count)`` so the + output is unit-variance gaussian again. + """ + _, hs, ws = noise.shape + _, counts, index_matrix = unique_pixels(noise[:1]) + + foreign_noise = torch.randn_like(noise) + summed = sum_indexed_values(foreign_noise, index_matrix) + meaned = indexed_to_image(index_matrix, summed / rearrange(counts, "u -> u 1")) + zeroed_foreign = foreign_noise - meaned + + counts_image = indexed_to_image(index_matrix, rearrange(counts, "u -> u 1")) + + output = noise / counts_image ** 0.5 + zeroed_foreign + return output, counts_image + + +def xy_meshgrid_like_image(image): + """Return a (2, H, W) tensor of (x, y) pixel coordinates matching ``image``.""" + _, h, w = image.shape + y, x = torch.meshgrid( + torch.arange(h, device=image.device, dtype=image.dtype), + torch.arange(w, device=image.device, dtype=image.dtype), + indexing="ij", + ) + return torch.stack([x, y]) + + +def noise_to_state(noise): + """Pack a (C, H, W) noise tensor into a state tensor (3+C, H, W) = [dx, dy, ω, noise].""" + zeros = torch.zeros_like(noise[:1]) + ones = torch.ones_like(noise[:1]) + return torch.cat([zeros, zeros, ones, noise]) + + +def state_to_noise(state): + """Unpack the noise channels from a state tensor.""" + return state[3:] + + +def warp_state(state, flow): + """Warp a noise-warper state tensor along the given optical flow. + + ``state`` has shape ``(3+c, h, w)`` (= dx, dy, ω, c noise channels). + ``flow`` has shape ``(2, h, w)`` (= dx, dy). + """ + if flow.device != state.device: + raise ValueError( + f"warp_state: flow and state must be on the same device, " + f"got flow={flow.device} state={state.device}" + ) + if state.ndim != 3: + raise ValueError( + f"warp_state: state must be 3D (3+C, H, W), got shape {tuple(state.shape)}" + ) + xyoc, h, w = state.shape + if flow.shape != (2, h, w): + raise ValueError( + f"warp_state: flow must have shape (2, {h}, {w}), got {tuple(flow.shape)}" + ) + device = state.device + + x_ch, y_ch = 0, 1 + xy = 2 # state[:xy] = [dx, dy] + xyw = 3 # state[:xyw] = [dx, dy, ω] + w_ch = 2 # state[w_ch] = ω + c = xyoc - xyw + oc = xyoc - xy + if c <= 0: + raise ValueError( + f"warp_state: state has no noise channels (expected 3+C with C>0, got {xyoc} channels)" + ) + if not (state[w_ch] > 0).all(): + raise ValueError("warp_state: all weights in state[2] must be > 0") + + grid = xy_meshgrid_like_image(state) + + init = torch.empty_like(state) + init[:xy] = 0 + init[w_ch] = 1 + init[-c:] = 0 + + # --- Expansion branch: nearest-neighbor remap with negated flow --- + pre_expand = torch.empty_like(state) + pre_expand[:xy] = _torch_remap_relative(state[:xy], -flow[0], -flow[1], "nearest") + pre_expand[-oc:] = _torch_remap_relative(state[-oc:], -flow[0], -flow[1], "nearest") + pre_expand[w_ch][pre_expand[w_ch] == 0] = 1 + + # --- Shrink branch: scatter-add state into new positions --- + pre_shrink = state.clone() + pre_shrink[:xy] += flow + + pos = (grid + pre_shrink[:xy]).round() + in_bounds = (pos[x_ch] >= 0) & (pos[x_ch] < w) & (pos[y_ch] >= 0) & (pos[y_ch] < h) + pre_shrink = torch.where(~in_bounds[None], init, pre_shrink) + + scat_xy = pre_shrink[:xy].round() + pre_shrink[:xy] -= scat_xy + pre_shrink[:xy] = 0 # xy_mode='none' in upstream + + def scat(tensor): + return _torch_scatter_add_relative(tensor, scat_xy[0], scat_xy[1]) + + # rp.torch_scatter_add_image on a bool tensor errors on modern torch; + # scatter-sum a float ones tensor and threshold to get the mask instead. + shrink_mask = scat(torch.ones(1, h, w, dtype=state.dtype, device=device)) > 0 + + # Drop expansion samples at positions that will be filled by shrink. + pre_expand = torch.where(shrink_mask, init, pre_expand) + + # Regaussianize both branches together so duplicated-source groups are + # counted globally, then split back apart. + concat = torch.cat([pre_shrink, pre_expand], dim=2) # along width + concat[-c:], counts_image = regaussianize(concat[-c:]) + concat[w_ch] = concat[w_ch] / counts_image[0] + concat[w_ch] = concat[w_ch].nan_to_num() + pre_shrink, expand = torch.chunk(concat, chunks=2, dim=2) + + shrink = torch.empty_like(pre_shrink) + shrink[w_ch] = scat(pre_shrink[w_ch][None])[0] + shrink[:xy] = scat(pre_shrink[:xy] * pre_shrink[w_ch][None]) / shrink[w_ch][None] + shrink[-c:] = scat(pre_shrink[-c:] * pre_shrink[w_ch][None]) / scat( + pre_shrink[w_ch][None] ** 2 + ).sqrt() + + output = torch.where(shrink_mask, shrink, expand) + output[w_ch] = output[w_ch] / output[w_ch].mean() + output[w_ch] += 1e-5 + output[w_ch] **= 0.9999 + return output + + +class NoiseWarper: + """Maintain a warpable noise state and emit gaussian noise per frame. + + Simplified from RyannDaGreat/CommonSource/noise_warp.py::NoiseWarper: + ``scale_factor``, ``post_noise_alpha``, ``progressive_noise_alpha``, and + ``warp_kwargs`` are all dropped since VOIDWarpedNoise always uses defaults. + """ + + def __init__(self, c, h, w, device, dtype=torch.float32): + if c <= 0 or h <= 0 or w <= 0: + raise ValueError( + f"NoiseWarper: c/h/w must all be positive, got c={c} h={h} w={w}" + ) + self.c = c + self.h = h + self.w = w + self.device = device + self.dtype = dtype + + noise = torch.randn(c, h, w, dtype=dtype, device=device) + self._state = noise_to_state(noise) + + @property + def noise(self): + # With scale_factor=1 the "downsample to respect weights" step is a + # size-preserving no-op; the weight-variance correction math still + # runs to stay faithful to upstream. + n = state_to_noise(self._state) + weights = self._state[2:3] + return n * weights / (weights ** 2).sqrt() + + def __call__(self, dx, dy): + if dx.shape != dy.shape: + raise ValueError( + f"NoiseWarper: dx and dy must match, got {tuple(dx.shape)} vs {tuple(dy.shape)}" + ) + flow = torch.stack([dx, dy]).to(self.device, self.dtype) + _, oflowh, ofloww = flow.shape + + flow = _torch_resize_chw(flow, (self.h, self.w), "bilinear", copy=True) + flowh, floww = flow.shape[-2:] + + # Upstream scales flow[0] by flowh/oflowh and flow[1] by floww/ofloww + # (channel-order appears swapped but harmless when H and W are scaled + # by the same factor, which is always the case for our callers). + flow[0] *= flowh / oflowh + flow[1] *= floww / ofloww + + self._state = warp_state(self._state, flow) + return self + + +# --------------------------------------------------------------------------- +# RAFT optical flow wrapper (ported from raft.py) +# --------------------------------------------------------------------------- + +class RaftOpticalFlow: + """RAFT-large wrapper around a pre-loaded torchvision model. + + ``model`` must be the ``torchvision.models.optical_flow.raft_large`` module + with its weights already populated; this class is load-agnostic so the + caller owns downloading/offload concerns (see ``OpticalFlowLoader`` in + ``nodes_void.py``). ``__call__`` returns a ``(2, H, W)`` flow. + """ + + def __init__(self, model, device=None): + if device is None: + device = comfy.model_management.get_torch_device() + device = torch.device(device) if not isinstance(device, torch.device) else device + + model = model.to(device) + model.eval() + self.device = device + self.model = model + + def _preprocess(self, image_chw): + image = image_chw.to(self.device, torch.float32) + _, h, w = image.shape + new_h = (h // 8) * 8 + new_w = (w // 8) * 8 + image = _torch_resize_chw(image, (new_h, new_w), "bilinear", copy=False) + image = image * 2 - 1 + return image[None] + + def __call__(self, from_image, to_image): + """``from_image``, ``to_image``: CHW float tensors in [0, 1].""" + if from_image.shape != to_image.shape: + raise ValueError( + f"RaftOpticalFlow: from_image and to_image must match, " + f"got {tuple(from_image.shape)} vs {tuple(to_image.shape)}" + ) + _, h, w = from_image.shape + with torch.no_grad(): + img1 = self._preprocess(from_image) + img2 = self._preprocess(to_image) + list_of_flows = self.model(img1, img2) + flow = list_of_flows[-1][0] # (2, new_h, new_w) + if flow.shape[-2:] != (h, w): + flow = _torch_resize_chw(flow, (h, w), "bilinear", copy=False) + return flow + + +# --------------------------------------------------------------------------- +# Narrow entry point used by VOIDWarpedNoise +# --------------------------------------------------------------------------- + +def get_noise_from_video( + video_frames: torch.Tensor, + raft: RaftOpticalFlow, + *, + noise_channels: int = 16, + resize_frames: float = 0.5, + resize_flow: int = 8, + downscale_factor: int = 32, + device: Optional[torch.device] = None, +) -> torch.Tensor: + """Produce optical-flow-warped gaussian noise from a video. + + Args: + video_frames: ``(T, H, W, 3)`` uint8 torch tensor. + raft: Pre-loaded RAFT optical-flow wrapper (see ``RaftOpticalFlow``). + noise_channels: Channels in the output noise. + resize_frames: Pre-RAFT frame scale factor. + resize_flow: Post-flow up-scale factor applied to the optical flow; + the internal noise state is allocated at + ``(resize_flow * resize_frames * H, resize_flow * resize_frames * W)``. + downscale_factor: Area-pool factor applied to the noise before return; + should evenly divide the internal noise resolution. + device: Target device. Defaults to ``comfy.model_management.get_torch_device()``. + + Returns: + ``(T, H', W', noise_channels)`` float32 noise tensor on ``device``. + """ + if not isinstance(resize_flow, int) or resize_flow < 1: + raise ValueError( + f"get_noise_from_video: resize_flow must be a positive int, got {resize_flow!r}" + ) + if video_frames.ndim != 4 or video_frames.shape[-1] != 3: + raise ValueError( + "get_noise_from_video: video_frames must have shape (T, H, W, 3), " + f"got {tuple(video_frames.shape)}" + ) + if video_frames.dtype != torch.uint8: + raise TypeError( + "get_noise_from_video: video_frames must be uint8 in [0, 255], " + f"got dtype {video_frames.dtype}" + ) + + if device is None: + device = comfy.model_management.get_torch_device() + device = torch.device(device) if not isinstance(device, torch.device) else device + + if device.type == "cpu": + logging.warning( + "VOIDWarpedNoise: running get_noise_from_video on CPU; this will be " + "slow (minutes for ~45 frames). Use CUDA for interactive use." + ) + + T = video_frames.shape[0] + frames = video_frames.to(device).permute(0, 3, 1, 2).to(torch.float32) / 255.0 + if resize_frames != 1.0: + new_h = max(1, int(frames.shape[2] * resize_frames)) + new_w = max(1, int(frames.shape[3] * resize_frames)) + frames = F.interpolate(frames, size=(new_h, new_w), mode="area") + + _, _, H, W = frames.shape + internal_h = resize_flow * H + internal_w = resize_flow * W + if internal_h % downscale_factor or internal_w % downscale_factor: + logging.warning( + "VOIDWarpedNoise: internal noise size %dx%d is not divisible by " + "downscale_factor %d; output noise may have artifacts.", + internal_h, internal_w, downscale_factor, + ) + + with torch.no_grad(): + warper = NoiseWarper( + c=noise_channels, h=internal_h, w=internal_w, device=device, + ) + down_h = warper.h // downscale_factor + down_w = warper.w // downscale_factor + output = torch.empty( + (T, down_h, down_w, noise_channels), dtype=torch.float32, device=device, + ) + + def downscale(noise_chw): + # Area-pool to 1/downscale_factor then multiply by downscale_factor + # to adjust std (sqrt of pool area == downscale_factor for a + # square pool). + down = _torch_resize_chw(noise_chw, 1.0 / downscale_factor, "area", copy=False) + return down * downscale_factor + + output[0] = downscale(warper.noise).permute(1, 2, 0) + + prev = frames[0] + for i in range(1, T): + curr = frames[i] + flow = raft(prev, curr).to(device) + warper(flow[0], flow[1]) + output[i] = downscale(warper.noise).permute(1, 2, 0) + prev = curr + + return output diff --git a/execution.py b/execution.py index 654db8426..f37d0360d 100644 --- a/execution.py +++ b/execution.py @@ -1019,7 +1019,12 @@ async def validate_inputs(prompt_id, prompt, item, validated, visiting=None): combo_options = extra_info.get("options", []) else: combo_options = input_type - if val not in combo_options: + is_multiselect = extra_info.get("multiselect", False) + if is_multiselect and isinstance(val, list): + invalid_vals = [v for v in val if v not in combo_options] + else: + invalid_vals = [val] if val not in combo_options else [] + if invalid_vals: input_config = info list_info = "" @@ -1034,7 +1039,7 @@ async def validate_inputs(prompt_id, prompt, item, validated, visiting=None): error = { "type": "value_not_in_list", "message": "Value not in list", - "details": f"{x}: '{val}' not in {list_info}", + "details": f"{x}: {', '.join(repr(v) for v in invalid_vals)} not in {list_info}", "extra_info": { "input_name": x, "input_config": input_config, diff --git a/folder_paths.py b/folder_paths.py index 80f4b291a..98d3b1880 100644 --- a/folder_paths.py +++ b/folder_paths.py @@ -54,6 +54,8 @@ folder_names_and_paths["audio_encoders"] = ([os.path.join(models_dir, "audio_enc folder_names_and_paths["frame_interpolation"] = ([os.path.join(models_dir, "frame_interpolation")], supported_pt_extensions) +folder_names_and_paths["optical_flow"] = ([os.path.join(models_dir, "optical_flow")], supported_pt_extensions) + output_directory = os.path.join(base_path, "output") temp_directory = os.path.join(base_path, "temp") input_directory = os.path.join(base_path, "input") @@ -432,7 +434,9 @@ def get_save_image_path(filename_prefix: str, output_dir: str, image_width=0, im prefix_len = len(os.path.basename(filename_prefix)) prefix = filename[:prefix_len + 1] try: - digits = int(filename[prefix_len + 1:].split('_')[0]) + remainder = filename[prefix_len + 1:] + base_remainder = remainder.split('.')[0] + digits = int(base_remainder.split('_')[0]) except: digits = 0 return digits, prefix diff --git a/models/optical_flow/put_optical_flow_models_here b/models/optical_flow/put_optical_flow_models_here new file mode 100644 index 000000000..e69de29bb diff --git a/nodes.py b/nodes.py index 1e41b2ae0..ad0cbc675 100644 --- a/nodes.py +++ b/nodes.py @@ -958,7 +958,7 @@ class CLIPLoader: @classmethod def INPUT_TYPES(s): return {"required": { "clip_name": (folder_paths.get_filename_list("text_encoders"), ), - "type": (["stable_diffusion", "stable_cascade", "sd3", "stable_audio", "mochi", "ltxv", "pixart", "cosmos", "lumina2", "wan", "hidream", "chroma", "ace", "omnigen2", "qwen_image", "hunyuan_image", "flux2", "ovis", "longcat_image"], ), + "type": (["stable_diffusion", "stable_cascade", "sd3", "stable_audio", "mochi", "ltxv", "pixart", "cosmos", "lumina2", "wan", "hidream", "chroma", "ace", "omnigen2", "qwen_image", "hunyuan_image", "flux2", "ovis", "longcat_image", "cogvideox"], ), }, "optional": { "device": (["default", "cpu"], {"advanced": True}), @@ -968,7 +968,7 @@ class CLIPLoader: CATEGORY = "advanced/loaders" - DESCRIPTION = "[Recipes]\n\nstable_diffusion: clip-l\nstable_cascade: clip-g\nsd3: t5 xxl/ clip-g / clip-l\nstable_audio: t5 base\nmochi: t5 xxl\ncosmos: old t5 xxl\nlumina2: gemma 2 2B\nwan: umt5 xxl\n hidream: llama-3.1 (Recommend) or t5\nomnigen2: qwen vl 2.5 3B" + DESCRIPTION = "[Recipes]\n\nstable_diffusion: clip-l\nstable_cascade: clip-g\nsd3: t5 xxl/ clip-g / clip-l\nstable_audio: t5 base\nmochi: t5 xxl\ncogvideox: t5 xxl (226-token padding)\ncosmos: old t5 xxl\nlumina2: gemma 2 2B\nwan: umt5 xxl\n hidream: llama-3.1 (Recommend) or t5\nomnigen2: qwen vl 2.5 3B" def load_clip(self, clip_name, type="stable_diffusion", device="default"): clip_type = getattr(comfy.sd.CLIPType, type.upper(), comfy.sd.CLIPType.STABLE_DIFFUSION) @@ -2262,7 +2262,7 @@ async def load_custom_node(module_path: str, ignore=set(), module_parent="custom logging.warning(f"Error while calling comfy_entrypoint in {module_path}: {e}") return False else: - logging.warning(f"Skip {module_path} module for custom nodes due to the lack of NODE_CLASS_MAPPINGS or NODES_LIST (need one).") + logging.warning(f"Skip {module_path} module for custom nodes due to the lack of NODE_CLASS_MAPPINGS or comfy_entrypoint (need one).") return False except Exception as e: logging.warning(traceback.format_exc()) @@ -2430,6 +2430,7 @@ async def init_builtin_extra_nodes(): "nodes_rtdetr.py", "nodes_frame_interpolation.py", "nodes_sam3.py", + "nodes_void.py", ] import_failed = [] diff --git a/openapi.yaml b/openapi.yaml index 30f85b6ad..29b5f544b 100644 --- a/openapi.yaml +++ b/openapi.yaml @@ -631,7 +631,7 @@ paths: operationId: getFeatures tags: [system] summary: Get enabled feature flags - description: Returns a dictionary of feature flag names to their enabled state. + description: Returns a dictionary of feature flag names to their enabled state. Cloud deployments may include additional typed fields alongside the boolean flags. responses: "200": description: Feature flags @@ -641,6 +641,43 @@ paths: type: object additionalProperties: type: boolean + properties: + max_upload_size: + type: integer + format: int64 + minimum: 0 + description: "Maximum file upload size in bytes." + free_tier_credits: + type: integer + format: int32 + minimum: 0 + nullable: true + x-runtime: [cloud] + description: "[cloud-only] Credits available to free-tier users. Local ComfyUI returns null." + posthog_api_host: + type: string + format: uri + nullable: true + x-runtime: [cloud] + description: "[cloud-only] PostHog analytics proxy URL for frontend telemetry. Local ComfyUI returns null." + max_concurrent_jobs: + type: integer + format: int32 + minimum: 0 + nullable: true + x-runtime: [cloud] + description: "[cloud-only] Maximum concurrent jobs the authenticated user can run. Local ComfyUI returns null." + workflow_templates_version: + type: string + nullable: true + x-runtime: [cloud] + description: "[cloud-only] Version identifier for the workflow templates bundle. Local ComfyUI returns null." + workflow_templates_source: + type: string + nullable: true + enum: [dynamic_config_override, workflow_templates_version_json] + x-runtime: [cloud] + description: "[cloud-only] How the templates version was resolved. Local ComfyUI returns null." # --------------------------------------------------------------------------- # Node / Object Info @@ -1497,6 +1534,24 @@ paths: type: string enum: [asc, desc] description: Sort direction + - name: job_ids + in: query + schema: + type: string + x-runtime: [cloud] + description: "[cloud-only] Comma-separated UUIDs to filter assets by associated job." + - name: include_public + in: query + schema: + type: boolean + x-runtime: [cloud] + description: "[cloud-only] Include workspace-public assets in addition to the caller's own." + - name: asset_hash + in: query + schema: + type: string + x-runtime: [cloud] + description: "[cloud-only] Filter by exact content hash." responses: "200": description: Asset list @@ -1542,6 +1597,49 @@ paths: type: string format: uuid description: ID of an existing asset to use as the preview image + id: + type: string + format: uuid + nullable: true + x-runtime: [cloud] + description: "[cloud-only] Client-supplied asset ID for idempotent creation. If an asset with this ID already exists, the existing asset is returned." + application/json: + schema: + type: object + x-runtime: [cloud] + description: "[cloud-only] URL-based asset upload. Caller supplies a URL instead of a file body; the server fetches the content." + required: + - url + properties: + url: + type: string + format: uri + description: "[cloud-only] URL of the file to import as an asset" + name: + type: string + description: Display name for the asset + tags: + type: string + description: Comma-separated tags + user_metadata: + type: string + description: JSON-encoded user metadata + hash: + type: string + description: "Blake3 hash of the file content (e.g. blake3:abc123...)" + mime_type: + type: string + description: MIME type of the file (overrides auto-detected type) + preview_id: + type: string + format: uuid + description: ID of an existing asset to use as the preview image + id: + type: string + format: uuid + nullable: true + x-runtime: [cloud] + description: "[cloud-only] Client-supplied asset ID for idempotent creation. If an asset with this ID already exists, the existing asset is returned." responses: "201": description: Asset created @@ -1580,6 +1678,11 @@ paths: user_metadata: type: object additionalProperties: true + mime_type: + type: string + nullable: true + x-runtime: [cloud] + description: "[cloud-only] MIME type of the content, so the type is preserved without re-inspecting content. Ignored by local ComfyUI." responses: "201": description: Asset created from hash @@ -1644,6 +1747,11 @@ paths: type: string format: uuid description: ID of the asset to use as the preview + mime_type: + type: string + nullable: true + x-runtime: [cloud] + description: "[cloud-only] MIME type override when auto-detection was wrong. Ignored by local ComfyUI." responses: "200": description: Asset updated @@ -2004,21 +2112,13 @@ components: format: uuid nullable: true x-runtime: [cloud] - description: | - UUID identifying a hosted-cloud workflow entity to associate with this - job. Local ComfyUI doesn't track workflow entities and returns `null` - (or omits the field). The `x-runtime: [cloud]` extension marks this - as populated only by the hosted-cloud runtime; absence of the tag - means a field is populated by all runtimes. + description: "[cloud-only] Cloud workflow entity ID for tracking and gallery association. Ignored by local ComfyUI." workflow_version_id: type: string format: uuid nullable: true x-runtime: [cloud] - description: | - UUID identifying a hosted-cloud workflow version to associate with - this job. Local ComfyUI returns `null` (or omits the field). See - `workflow_id` above for `x-runtime` semantics. + description: "[cloud-only] Cloud workflow version ID for pinning execution to a specific version. Ignored by local ComfyUI." PromptResponse: type: object diff --git a/requirements.txt b/requirements.txt index 32826e25a..e7aa92c31 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ -comfyui-frontend-package==1.42.15 -comfyui-workflow-templates==0.9.68 +comfyui-frontend-package==1.43.17 +comfyui-workflow-templates==0.9.69 comfyui-embedded-docs==0.4.4 torch torchsde diff --git a/server.py b/server.py index 2f3b438bb..0e85635d3 100644 --- a/server.py +++ b/server.py @@ -560,7 +560,7 @@ class PromptServer(): buffer.seek(0) return web.Response(body=buffer.read(), content_type=f'image/{image_format}', - headers={"Content-Disposition": f"filename=\"{filename}\""}) + headers={"Content-Disposition": f"attachment; filename=\"{filename}\""}) if 'channel' not in request.rel_url.query: channel = 'rgba' @@ -580,7 +580,7 @@ class PromptServer(): buffer.seek(0) return web.Response(body=buffer.read(), content_type='image/png', - headers={"Content-Disposition": f"filename=\"{filename}\""}) + headers={"Content-Disposition": f"attachment; filename=\"{filename}\""}) elif channel == 'a': with Image.open(file) as img: @@ -597,7 +597,7 @@ class PromptServer(): alpha_buffer.seek(0) return web.Response(body=alpha_buffer.read(), content_type='image/png', - headers={"Content-Disposition": f"filename=\"{filename}\""}) + headers={"Content-Disposition": f"attachment; filename=\"{filename}\""}) else: # Use the content type from asset resolution if available, # otherwise guess from the filename. @@ -614,7 +614,7 @@ class PromptServer(): return web.FileResponse( file, headers={ - "Content-Disposition": f"filename=\"{filename}\"", + "Content-Disposition": f"attachment; filename=\"{filename}\"", "Content-Type": content_type } ) diff --git a/tests-unit/comfy_api_test/multicombo_serialization_test.py b/tests-unit/comfy_api_test/multicombo_serialization_test.py new file mode 100644 index 000000000..421c65a0d --- /dev/null +++ b/tests-unit/comfy_api_test/multicombo_serialization_test.py @@ -0,0 +1,78 @@ +from comfy_api.latest._io import Combo, MultiCombo + + +def test_multicombo_serializes_multi_select_as_object(): + multi_combo = MultiCombo.Input( + id="providers", + options=["a", "b", "c"], + default=["a"], + ) + + serialized = multi_combo.as_dict() + + assert serialized["multiselect"] is True + assert "multi_select" in serialized + assert serialized["multi_select"] == {} + + +def test_multicombo_serializes_multi_select_with_placeholder_and_chip(): + multi_combo = MultiCombo.Input( + id="providers", + options=["a", "b", "c"], + default=["a"], + placeholder="Select providers", + chip=True, + ) + + serialized = multi_combo.as_dict() + + assert serialized["multiselect"] is True + assert serialized["multi_select"] == { + "placeholder": "Select providers", + "chip": True, + } + + +def test_combo_does_not_serialize_multiselect(): + """Regular Combo should not have multiselect in its serialized output.""" + combo = Combo.Input( + id="choice", + options=["a", "b", "c"], + ) + + serialized = combo.as_dict() + + # Combo sets multiselect=False, but prune_dict keeps False (not None), + # so it should be present but False + assert serialized.get("multiselect") is False + assert "multi_select" not in serialized + + +def _validate_combo_values(val, combo_options, is_multiselect): + """Reproduce the validation logic from execution.py for testing.""" + if is_multiselect and isinstance(val, list): + return [v for v in val if v not in combo_options] + else: + return [val] if val not in combo_options else [] + + +def test_multicombo_validation_accepts_valid_list(): + options = ["a", "b", "c"] + assert _validate_combo_values(["a", "b"], options, True) == [] + + +def test_multicombo_validation_rejects_invalid_values(): + options = ["a", "b", "c"] + assert _validate_combo_values(["a", "x"], options, True) == ["x"] + + +def test_multicombo_validation_accepts_empty_list(): + options = ["a", "b", "c"] + assert _validate_combo_values([], options, True) == [] + + +def test_combo_validation_rejects_list_even_with_valid_items(): + """A regular Combo should not accept a list value.""" + options = ["a", "b", "c"] + invalid = _validate_combo_values(["a", "b"], options, False) + assert len(invalid) > 0 diff --git a/tests-unit/prompt_server_test/user_manager_test.py b/tests-unit/prompt_server_test/user_manager_test.py index b939d8e68..27118400f 100644 --- a/tests-unit/prompt_server_test/user_manager_test.py +++ b/tests-unit/prompt_server_test/user_manager_test.py @@ -69,7 +69,11 @@ async def test_listuserdata_full_info(aiohttp_client, app, tmp_path): assert len(result) == 1 assert result[0]["path"] == "file1.txt" assert "size" in result[0] - assert "modified" in result[0] + assert isinstance(result[0]["modified"], int) + assert isinstance(result[0]["created"], int) + # Verify millisecond magnitude (timestamps after year 2000 in ms are > 946684800000) + assert result[0]["modified"] > 946684800000 + assert result[0]["created"] > 946684800000 async def test_listuserdata_split_path(aiohttp_client, app, tmp_path):