From 1ac60da2c9c8f83654204b2a1db13908cf7614f7 Mon Sep 17 00:00:00 2001 From: Matt Miller Date: Tue, 5 May 2026 13:21:36 -0700 Subject: [PATCH 01/25] Add Spectral lint CI gate for openapi.yaml (#13410) * Add Spectral lint CI gate for openapi.yaml Adds a blocking Spectral lint check that runs on PRs touching openapi.yaml or the ruleset itself. The ruleset mirrors the one used for other Comfy-Org service specs: spectral:oas plus conventions for snake_case properties, camelCase operationIds, and response/schema shape. Gate runs at --fail-severity=error, which the spec currently passes with zero errors (a small number of non-blocking warnings/hints remain for WebSocket 101 responses, the existing loose error schema, and two snake_case wire fields). * ci: set least-privilege contents:read permissions on openapi-lint workflow Per CodeRabbit review on #13410. The job only checks out the repo and runs Spectral, so contents:read is sufficient and avoids inheriting any permissive repo/org default token scope. --------- Co-authored-by: guill --- .github/workflows/openapi-lint.yml | 31 ++++++++++ .spectral.yaml | 91 ++++++++++++++++++++++++++++++ 2 files changed, 122 insertions(+) create mode 100644 .github/workflows/openapi-lint.yml create mode 100644 .spectral.yaml diff --git a/.github/workflows/openapi-lint.yml b/.github/workflows/openapi-lint.yml new file mode 100644 index 000000000..be949de2a --- /dev/null +++ b/.github/workflows/openapi-lint.yml @@ -0,0 +1,31 @@ +name: OpenAPI Lint + +on: + pull_request: + paths: + - 'openapi.yaml' + - '.spectral.yaml' + - '.github/workflows/openapi-lint.yml' + +permissions: + contents: read + +jobs: + spectral: + name: Run Spectral + runs-on: ubuntu-latest + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Set up Node.js + uses: actions/setup-node@v4 + with: + node-version: '20' + + - name: Install Spectral + run: npm install -g @stoplight/spectral-cli@6 + + - name: Lint openapi.yaml + run: spectral lint openapi.yaml --ruleset .spectral.yaml --fail-severity=error diff --git a/.spectral.yaml b/.spectral.yaml new file mode 100644 index 000000000..4bb4a4a94 --- /dev/null +++ b/.spectral.yaml @@ -0,0 +1,91 @@ +extends: + - spectral:oas + +# Severity levels: error, warn, info, hint, off +# Rules from the built-in "spectral:oas" ruleset are active by default. +# Below we tune severity and add custom rules for our conventions. +# +# This ruleset mirrors Comfy-Org/cloud/.spectral.yaml so specs across the +# organization are linted against a single consistent standard. + +rules: + # ----------------------------------------------------------------------- + # Built-in rule severity overrides + # ----------------------------------------------------------------------- + operation-operationId: error + operation-description: warn + operation-tag-defined: error + info-contact: off + info-description: warn + no-eval-in-markdown: error + no-$ref-siblings: error + + # ----------------------------------------------------------------------- + # Custom rules: naming conventions + # ----------------------------------------------------------------------- + + # Property names should be snake_case + property-name-snake-case: + description: Property names must be snake_case + severity: warn + given: "$.components.schemas.*.properties[*]~" + then: + function: pattern + functionOptions: + match: "^[a-z][a-z0-9]*(_[a-z0-9]+)*$" + + # Operation IDs should be camelCase + operation-id-camel-case: + description: Operation IDs must be camelCase + severity: warn + given: "$.paths.*.*.operationId" + then: + function: pattern + functionOptions: + match: "^[a-z][a-zA-Z0-9]*$" + + # ----------------------------------------------------------------------- + # Custom rules: response conventions + # ----------------------------------------------------------------------- + + # Error responses (4xx, 5xx) should use a consistent shape + error-response-schema: + description: Error responses should reference a standard error schema + severity: hint + given: "$.paths.*.*.responses[?(@property >= '400' && @property < '600')].content['application/json'].schema" + then: + field: "$ref" + function: truthy + + # All 2xx responses with JSON body should have a schema + response-schema-defined: + description: Success responses with JSON content should define a schema + severity: warn + given: "$.paths.*.*.responses[?(@property >= '200' && @property < '300')].content['application/json']" + then: + field: schema + function: truthy + + # ----------------------------------------------------------------------- + # Custom rules: best practices + # ----------------------------------------------------------------------- + + # Path parameters must have a description + path-param-description: + description: Path parameters should have a description + severity: warn + given: + - "$.paths.*.parameters[?(@.in == 'path')]" + - "$.paths.*.*.parameters[?(@.in == 'path')]" + then: + field: description + function: truthy + + # Schemas should have a description + schema-description: + description: Component schemas should have a description + severity: hint + given: "$.components.schemas.*" + then: + field: description + function: truthy From 431fadb520bbd2d18cbbd4067e06222301f1b4fe Mon Sep 17 00:00:00 2001 From: Jedrzej Kosinski Date: Tue, 5 May 2026 13:58:32 -0700 Subject: [PATCH 02/25] fix(api-io): serialize MultiCombo multi_select as object config (#13484) * fix(api-io): serialize MultiCombo multi_select as object config * fix: remove dead code and redundant top-level keys from MultiCombo serialization * fix: correct skip warning to mention comfy_entrypoint, remove nonexistent NODES_LIST * fix: validate MultiCombo list values against options individually * fix: gate multiselect validation on schema config, improve error message, add tests --------- Co-authored-by: Ni-zav Co-authored-by: guill --- comfy_api/latest/_io.py | 13 ++-- execution.py | 9 ++- nodes.py | 2 +- .../multicombo_serialization_test.py | 78 +++++++++++++++++++ 4 files changed, 93 insertions(+), 9 deletions(-) create mode 100644 tests-unit/comfy_api_test/multicombo_serialization_test.py diff --git a/comfy_api/latest/_io.py b/comfy_api/latest/_io.py index 4942ed46c..e50266bc5 100644 --- a/comfy_api/latest/_io.py +++ b/comfy_api/latest/_io.py @@ -395,7 +395,6 @@ class Combo(ComfyTypeIO): @comfytype(io_type="COMBO") class MultiCombo(ComfyTypeI): '''Multiselect Combo input (dropdown for selecting potentially more than one value).''' - # TODO: something is wrong with the serialization, frontend does not recognize it as multiselect Type = list[str] class Input(Combo.Input): def __init__(self, id: str, options: list[str], display_name: str=None, optional=False, tooltip: str=None, lazy: bool=None, @@ -408,12 +407,14 @@ class MultiCombo(ComfyTypeI): self.default: list[str] def as_dict(self): - to_return = super().as_dict() | prune_dict({ - "multi_select": self.multiselect, - "placeholder": self.placeholder, - "chip": self.chip, + # Frontend expects `multi_select` to be an object config (not a boolean). + # Keep top-level `multiselect` from Combo.Input for backwards compatibility. + return super().as_dict() | prune_dict({ + "multi_select": prune_dict({ + "placeholder": self.placeholder, + "chip": self.chip, + }), }) - return to_return @comfytype(io_type="IMAGE") class Image(ComfyTypeIO): diff --git a/execution.py b/execution.py index 654db8426..f37d0360d 100644 --- a/execution.py +++ b/execution.py @@ -1019,7 +1019,12 @@ async def validate_inputs(prompt_id, prompt, item, validated, visiting=None): combo_options = extra_info.get("options", []) else: combo_options = input_type - if val not in combo_options: + is_multiselect = extra_info.get("multiselect", False) + if is_multiselect and isinstance(val, list): + invalid_vals = [v for v in val if v not in combo_options] + else: + invalid_vals = [val] if val not in combo_options else [] + if invalid_vals: input_config = info list_info = "" @@ -1034,7 +1039,7 @@ async def validate_inputs(prompt_id, prompt, item, validated, visiting=None): error = { "type": "value_not_in_list", "message": "Value not in list", - "details": f"{x}: '{val}' not in {list_info}", + "details": f"{x}: {', '.join(repr(v) for v in invalid_vals)} not in {list_info}", "extra_info": { "input_name": x, "input_config": input_config, diff --git a/nodes.py b/nodes.py index 1e41b2ae0..cf61d9df0 100644 --- a/nodes.py +++ b/nodes.py @@ -2262,7 +2262,7 @@ async def load_custom_node(module_path: str, ignore=set(), module_parent="custom logging.warning(f"Error while calling comfy_entrypoint in {module_path}: {e}") return False else: - logging.warning(f"Skip {module_path} module for custom nodes due to the lack of NODE_CLASS_MAPPINGS or NODES_LIST (need one).") + logging.warning(f"Skip {module_path} module for custom nodes due to the lack of NODE_CLASS_MAPPINGS or comfy_entrypoint (need one).") return False except Exception as e: logging.warning(traceback.format_exc()) diff --git a/tests-unit/comfy_api_test/multicombo_serialization_test.py b/tests-unit/comfy_api_test/multicombo_serialization_test.py new file mode 100644 index 000000000..421c65a0d --- /dev/null +++ b/tests-unit/comfy_api_test/multicombo_serialization_test.py @@ -0,0 +1,78 @@ +from comfy_api.latest._io import Combo, MultiCombo + + +def test_multicombo_serializes_multi_select_as_object(): + multi_combo = MultiCombo.Input( + id="providers", + options=["a", "b", "c"], + default=["a"], + ) + + serialized = multi_combo.as_dict() + + assert serialized["multiselect"] is True + assert "multi_select" in serialized + assert serialized["multi_select"] == {} + + +def test_multicombo_serializes_multi_select_with_placeholder_and_chip(): + multi_combo = MultiCombo.Input( + id="providers", + options=["a", "b", "c"], + default=["a"], + placeholder="Select providers", + chip=True, + ) + + serialized = multi_combo.as_dict() + + assert serialized["multiselect"] is True + assert serialized["multi_select"] == { + "placeholder": "Select providers", + "chip": True, + } + + +def test_combo_does_not_serialize_multiselect(): + """Regular Combo should not have multiselect in its serialized output.""" + combo = Combo.Input( + id="choice", + options=["a", "b", "c"], + ) + + serialized = combo.as_dict() + + # Combo sets multiselect=False, but prune_dict keeps False (not None), + # so it should be present but False + assert serialized.get("multiselect") is False + assert "multi_select" not in serialized + + +def _validate_combo_values(val, combo_options, is_multiselect): + """Reproduce the validation logic from execution.py for testing.""" + if is_multiselect and isinstance(val, list): + return [v for v in val if v not in combo_options] + else: + return [val] if val not in combo_options else [] + + +def test_multicombo_validation_accepts_valid_list(): + options = ["a", "b", "c"] + assert _validate_combo_values(["a", "b"], options, True) == [] + + +def test_multicombo_validation_rejects_invalid_values(): + options = ["a", "b", "c"] + assert _validate_combo_values(["a", "x"], options, True) == ["x"] + + +def test_multicombo_validation_accepts_empty_list(): + options = ["a", "b", "c"] + assert _validate_combo_values([], options, True) == [] + + +def test_combo_validation_rejects_list_even_with_valid_items(): + """A regular Combo should not accept a list value.""" + options = ["a", "b", "c"] + invalid = _validate_combo_values(["a", "b"], options, False) + assert len(invalid) > 0 From 89014792c966b04bf18f7ba62aee5169f9094e84 Mon Sep 17 00:00:00 2001 From: Matt Miller Date: Tue, 5 May 2026 14:20:09 -0700 Subject: [PATCH 03/25] feat: add cloud-specific fields to OSS openapi.yaml as nullable (#13623) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * feat: add cloud-specific fields to OSS openapi.yaml as nullable Add cross-runtime fields with x-runtime: [cloud] extension and [cloud-only] description prefix per the convention established in BE-613. All new fields are nullable and not in required arrays, so they are purely additive. /api/features response: - max_upload_size (integer, int64) - free_tier_credits (integer, int32) - posthog_api_host (string, uri) - max_concurrent_jobs (integer, int32) - workflow_templates_version (string) - workflow_templates_source (string, enum) PromptRequest schema: - workflow_id (string, uuid) - workflow_version_id (string, uuid) POST /api/assets: - id field (uuid) on multipart/form-data for idempotent creation - application/json alternate content-type for URL-based uploads POST /api/assets/from-hash: - mime_type (string) to preserve type without re-inspection PUT /api/assets/{id}: - mime_type (string) for overriding auto-detection GET /api/assets additional query parameters: - job_ids (string) — filter by associated job UUIDs - include_public (boolean) — include workspace-public assets - asset_hash (string) — filter by exact content hash Resolves: BE-613 Blocks: BE-364, BE-361, BE-363 Co-authored-by: Matt Miller * fix(openapi): address CodeRabbit feedback (BE-613) - max_upload_size is set in both runtimes via SERVER_FEATURE_FLAGS; drop the cloud-only / nullable tagging. - Require `url` on the application/json POST /api/assets body so the contract is enforceable by validators and codegen. --------- Co-authored-by: Matt Miller --- openapi.yaml | 122 ++++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 111 insertions(+), 11 deletions(-) diff --git a/openapi.yaml b/openapi.yaml index 30f85b6ad..29b5f544b 100644 --- a/openapi.yaml +++ b/openapi.yaml @@ -631,7 +631,7 @@ paths: operationId: getFeatures tags: [system] summary: Get enabled feature flags - description: Returns a dictionary of feature flag names to their enabled state. + description: Returns a dictionary of feature flag names to their enabled state. Cloud deployments may include additional typed fields alongside the boolean flags. responses: "200": description: Feature flags @@ -641,6 +641,43 @@ paths: type: object additionalProperties: type: boolean + properties: + max_upload_size: + type: integer + format: int64 + minimum: 0 + description: "Maximum file upload size in bytes." + free_tier_credits: + type: integer + format: int32 + minimum: 0 + nullable: true + x-runtime: [cloud] + description: "[cloud-only] Credits available to free-tier users. Local ComfyUI returns null." + posthog_api_host: + type: string + format: uri + nullable: true + x-runtime: [cloud] + description: "[cloud-only] PostHog analytics proxy URL for frontend telemetry. Local ComfyUI returns null." + max_concurrent_jobs: + type: integer + format: int32 + minimum: 0 + nullable: true + x-runtime: [cloud] + description: "[cloud-only] Maximum concurrent jobs the authenticated user can run. Local ComfyUI returns null." + workflow_templates_version: + type: string + nullable: true + x-runtime: [cloud] + description: "[cloud-only] Version identifier for the workflow templates bundle. Local ComfyUI returns null." + workflow_templates_source: + type: string + nullable: true + enum: [dynamic_config_override, workflow_templates_version_json] + x-runtime: [cloud] + description: "[cloud-only] How the templates version was resolved. Local ComfyUI returns null." # --------------------------------------------------------------------------- # Node / Object Info @@ -1497,6 +1534,24 @@ paths: type: string enum: [asc, desc] description: Sort direction + - name: job_ids + in: query + schema: + type: string + x-runtime: [cloud] + description: "[cloud-only] Comma-separated UUIDs to filter assets by associated job." + - name: include_public + in: query + schema: + type: boolean + x-runtime: [cloud] + description: "[cloud-only] Include workspace-public assets in addition to the caller's own." + - name: asset_hash + in: query + schema: + type: string + x-runtime: [cloud] + description: "[cloud-only] Filter by exact content hash." responses: "200": description: Asset list @@ -1542,6 +1597,49 @@ paths: type: string format: uuid description: ID of an existing asset to use as the preview image + id: + type: string + format: uuid + nullable: true + x-runtime: [cloud] + description: "[cloud-only] Client-supplied asset ID for idempotent creation. If an asset with this ID already exists, the existing asset is returned." + application/json: + schema: + type: object + x-runtime: [cloud] + description: "[cloud-only] URL-based asset upload. Caller supplies a URL instead of a file body; the server fetches the content." + required: + - url + properties: + url: + type: string + format: uri + description: "[cloud-only] URL of the file to import as an asset" + name: + type: string + description: Display name for the asset + tags: + type: string + description: Comma-separated tags + user_metadata: + type: string + description: JSON-encoded user metadata + hash: + type: string + description: "Blake3 hash of the file content (e.g. blake3:abc123...)" + mime_type: + type: string + description: MIME type of the file (overrides auto-detected type) + preview_id: + type: string + format: uuid + description: ID of an existing asset to use as the preview image + id: + type: string + format: uuid + nullable: true + x-runtime: [cloud] + description: "[cloud-only] Client-supplied asset ID for idempotent creation. If an asset with this ID already exists, the existing asset is returned." responses: "201": description: Asset created @@ -1580,6 +1678,11 @@ paths: user_metadata: type: object additionalProperties: true + mime_type: + type: string + nullable: true + x-runtime: [cloud] + description: "[cloud-only] MIME type of the content, so the type is preserved without re-inspecting content. Ignored by local ComfyUI." responses: "201": description: Asset created from hash @@ -1644,6 +1747,11 @@ paths: type: string format: uuid description: ID of the asset to use as the preview + mime_type: + type: string + nullable: true + x-runtime: [cloud] + description: "[cloud-only] MIME type override when auto-detection was wrong. Ignored by local ComfyUI." responses: "200": description: Asset updated @@ -2004,21 +2112,13 @@ components: format: uuid nullable: true x-runtime: [cloud] - description: | - UUID identifying a hosted-cloud workflow entity to associate with this - job. Local ComfyUI doesn't track workflow entities and returns `null` - (or omits the field). The `x-runtime: [cloud]` extension marks this - as populated only by the hosted-cloud runtime; absence of the tag - means a field is populated by all runtimes. + description: "[cloud-only] Cloud workflow entity ID for tracking and gallery association. Ignored by local ComfyUI." workflow_version_id: type: string format: uuid nullable: true x-runtime: [cloud] - description: | - UUID identifying a hosted-cloud workflow version to associate with - this job. Local ComfyUI returns `null` (or omits the field). See - `workflow_id` above for `x-runtime` semantics. + description: "[cloud-only] Cloud workflow version ID for pinning execution to a specific version. Ignored by local ComfyUI." PromptResponse: type: object From 1655f8089a23232a94b36129286942c33e740168 Mon Sep 17 00:00:00 2001 From: drozbay <17261091+drozbay@users.noreply.github.com> Date: Tue, 5 May 2026 17:30:00 -0600 Subject: [PATCH 04/25] Add temporal_downscale_ratio to LatentFormat (#13702) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: ozbayb <17261091+ozbayb@users.noreply.github.com> Co-authored-by: Alexis Rolland Co-authored-by: Jukka Seppänen <40791699+kijai@users.noreply.github.com> Co-authored-by: Jedrzej Kosinski --- comfy/latent_formats.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/comfy/latent_formats.py b/comfy/latent_formats.py index 3dac5be18..60c0dfd7e 100644 --- a/comfy/latent_formats.py +++ b/comfy/latent_formats.py @@ -9,6 +9,7 @@ class LatentFormat: latent_rgb_factors_reshape = None taesd_decoder_name = None spacial_downscale_ratio = 8 + temporal_downscale_ratio = 1 def process_in(self, latent): return latent * self.scale_factor @@ -235,6 +236,7 @@ class Flux2(LatentFormat): class Mochi(LatentFormat): latent_channels = 12 latent_dimensions = 3 + temporal_downscale_ratio = 6 def __init__(self): self.scale_factor = 1.0 @@ -278,6 +280,7 @@ class LTXV(LatentFormat): latent_channels = 128 latent_dimensions = 3 spacial_downscale_ratio = 32 + temporal_downscale_ratio = 8 def __init__(self): self.latent_rgb_factors = [ @@ -421,6 +424,7 @@ class LTXAV(LTXV): class HunyuanVideo(LatentFormat): latent_channels = 16 latent_dimensions = 3 + temporal_downscale_ratio = 4 scale_factor = 0.476986 latent_rgb_factors = [ [-0.0395, -0.0331, 0.0445], @@ -447,6 +451,7 @@ class HunyuanVideo(LatentFormat): class Cosmos1CV8x8x8(LatentFormat): latent_channels = 16 latent_dimensions = 3 + temporal_downscale_ratio = 8 latent_rgb_factors = [ [ 0.1817, 0.2284, 0.2423], @@ -472,6 +477,7 @@ class Cosmos1CV8x8x8(LatentFormat): class Wan21(LatentFormat): latent_channels = 16 latent_dimensions = 3 + temporal_downscale_ratio = 4 latent_rgb_factors = [ [-0.1299, -0.1692, 0.2932], @@ -734,6 +740,7 @@ class HunyuanVideo15(LatentFormat): latent_channels = 32 latent_dimensions = 3 spacial_downscale_ratio = 16 + temporal_downscale_ratio = 4 scale_factor = 1.03682 taesd_decoder_name = "lighttaehy1_5" @@ -788,6 +795,7 @@ class ZImagePixelSpace(ChromaRadiance): class CogVideoX(LatentFormat): latent_channels = 16 latent_dimensions = 3 + temporal_downscale_ratio = 4 def __init__(self): self.scale_factor = 1.15258426 From e5369c0eec8b1b9c6d1b12a2e4167b46c7fd1c1e Mon Sep 17 00:00:00 2001 From: drozbay <17261091+drozbay@users.noreply.github.com> Date: Tue, 5 May 2026 17:40:53 -0600 Subject: [PATCH 05/25] feat: Context windows - add causal_window_fix to improve blending of context windows (CORE-100) (#13563) * Context windows: add causal_window_fix toggle * Fix slice_cond to correctly handle causal anchor index for temporal offsets --- comfy/context_windows.py | 33 ++++++++++++++++++++++++--- comfy_extras/nodes_context_windows.py | 6 +++-- 2 files changed, 34 insertions(+), 5 deletions(-) diff --git a/comfy/context_windows.py b/comfy/context_windows.py index cb44ee6e8..db57537a2 100644 --- a/comfy/context_windows.py +++ b/comfy/context_windows.py @@ -63,7 +63,11 @@ class IndexListContextWindow(ContextWindowABC): dim = self.dim if dim == 0 and full.shape[dim] == 1: return full - idx = tuple([slice(None)] * dim + [self.index_list]) + indices = self.index_list + anchor_idx = getattr(self, 'causal_anchor_index', None) + if anchor_idx is not None and anchor_idx >= 0: + indices = [anchor_idx] + list(indices) + idx = tuple([slice(None)] * dim + [indices]) window = full[idx] if retain_index_list: idx = tuple([slice(None)] * dim + [retain_index_list]) @@ -113,7 +117,14 @@ def slice_cond(cond_value, window: IndexListContextWindow, x_in: torch.Tensor, d # skip leading latent positions that have no corresponding conditioning (e.g. reference frames) if temporal_offset > 0: - indices = [i - temporal_offset for i in window.index_list[temporal_offset:]] + anchor_idx = getattr(window, 'causal_anchor_index', None) + if anchor_idx is not None and anchor_idx >= 0: + # anchor occupies one of the no-cond positions, so skip one fewer from window.index_list + skip_count = temporal_offset - 1 + else: + skip_count = temporal_offset + + indices = [i - temporal_offset for i in window.index_list[skip_count:]] indices = [i for i in indices if 0 <= i] else: indices = list(window.index_list) @@ -150,7 +161,8 @@ class ContextFuseMethod: ContextResults = collections.namedtuple("ContextResults", ['window_idx', 'sub_conds_out', 'sub_conds', 'window']) class IndexListContextHandler(ContextHandlerABC): def __init__(self, context_schedule: ContextSchedule, fuse_method: ContextFuseMethod, context_length: int=1, context_overlap: int=0, context_stride: int=1, - closed_loop: bool=False, dim:int=0, freenoise: bool=False, cond_retain_index_list: list[int]=[], split_conds_to_windows: bool=False): + closed_loop: bool=False, dim:int=0, freenoise: bool=False, cond_retain_index_list: list[int]=[], split_conds_to_windows: bool=False, + causal_window_fix: bool=True): self.context_schedule = context_schedule self.fuse_method = fuse_method self.context_length = context_length @@ -162,6 +174,7 @@ class IndexListContextHandler(ContextHandlerABC): self.freenoise = freenoise self.cond_retain_index_list = [int(x.strip()) for x in cond_retain_index_list.split(",")] if cond_retain_index_list else [] self.split_conds_to_windows = split_conds_to_windows + self.causal_window_fix = causal_window_fix self.callbacks = {} @@ -318,6 +331,14 @@ class IndexListContextHandler(ContextHandlerABC): # allow processing to end between context window executions for faster Cancel comfy.model_management.throw_exception_if_processing_interrupted() + # causal_window_fix: prepend a pre-window frame that will be stripped post-forward + anchor_applied = False + if self.causal_window_fix: + anchor_idx = window.index_list[0] - 1 + if 0 <= anchor_idx < x_in.size(self.dim): + window.causal_anchor_index = anchor_idx + anchor_applied = True + for callback in comfy.patcher_extension.get_all_callbacks(IndexListCallbacks.EVALUATE_CONTEXT_WINDOWS, self.callbacks): callback(self, model, x_in, conds, timestep, model_options, window_idx, window, model_options, device, first_device) @@ -332,6 +353,12 @@ class IndexListContextHandler(ContextHandlerABC): if device is not None: for i in range(len(sub_conds_out)): sub_conds_out[i] = sub_conds_out[i].to(x_in.device) + + # strip causal_window_fix anchor if applied + if anchor_applied: + for i in range(len(sub_conds_out)): + sub_conds_out[i] = sub_conds_out[i].narrow(self.dim, 1, sub_conds_out[i].shape[self.dim] - 1) + results.append(ContextResults(window_idx, sub_conds_out, sub_conds, window)) return results diff --git a/comfy_extras/nodes_context_windows.py b/comfy_extras/nodes_context_windows.py index 0e43f2e44..fefc56d26 100644 --- a/comfy_extras/nodes_context_windows.py +++ b/comfy_extras/nodes_context_windows.py @@ -29,6 +29,7 @@ class ContextWindowsManualNode(io.ComfyNode): io.Boolean.Input("freenoise", default=False, tooltip="Whether to apply FreeNoise noise shuffling, improves window blending."), io.String.Input("cond_retain_index_list", default="", tooltip="List of latent indices to retain in the conditioning tensors for each window, for example setting this to '0' will use the initial start image for each window."), io.Boolean.Input("split_conds_to_windows", default=False, tooltip="Whether to split multiple conditionings (created by ConditionCombine) to each window based on region index."), + io.Boolean.Input("causal_window_fix", default=True, tooltip="Whether to add a causal fix frame to non-0-indexed context windows."), ], outputs=[ io.Model.Output(tooltip="The model with context windows applied during sampling."), @@ -38,7 +39,7 @@ class ContextWindowsManualNode(io.ComfyNode): @classmethod def execute(cls, model: io.Model.Type, context_length: int, context_overlap: int, context_schedule: str, context_stride: int, closed_loop: bool, fuse_method: str, dim: int, freenoise: bool, - cond_retain_index_list: list[int]=[], split_conds_to_windows: bool=False) -> io.Model: + cond_retain_index_list: list[int]=[], split_conds_to_windows: bool=False, causal_window_fix: bool=True) -> io.Model: model = model.clone() model.model_options["context_handler"] = comfy.context_windows.IndexListContextHandler( context_schedule=comfy.context_windows.get_matching_context_schedule(context_schedule), @@ -50,7 +51,8 @@ class ContextWindowsManualNode(io.ComfyNode): dim=dim, freenoise=freenoise, cond_retain_index_list=cond_retain_index_list, - split_conds_to_windows=split_conds_to_windows + split_conds_to_windows=split_conds_to_windows, + causal_window_fix=causal_window_fix, ) # make memory usage calculation only take into account the context window latents comfy.context_windows.create_prepare_sampling_wrapper(model) From c168960a12213df8123a2f234f04a4cf55bbe30d Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Tue, 5 May 2026 17:00:11 -0700 Subject: [PATCH 06/25] First step of supporting save filenames without trailing _ (#13722) get_save_image_path now properly supports filenames without trailing underscores. This will be the saving behavior when using a mix of save image nodes using the old and the new format. ComfyUI_00001_.png ComfyUI_00002.png ComfyUI_00003.png ComfyUI_00004_.png --- folder_paths.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/folder_paths.py b/folder_paths.py index 80f4b291a..039f72636 100644 --- a/folder_paths.py +++ b/folder_paths.py @@ -432,7 +432,9 @@ def get_save_image_path(filename_prefix: str, output_dir: str, image_width=0, im prefix_len = len(os.path.basename(filename_prefix)) prefix = filename[:prefix_len + 1] try: - digits = int(filename[prefix_len + 1:].split('_')[0]) + remainder = filename[prefix_len + 1:] + base_remainder = remainder.split('.')[0] + digits = int(base_remainder.split('_')[0]) except: digits = 0 return digits, prefix From 160b95f75c9cf60b04fbbf4ec0b8f35f474ffb2a Mon Sep 17 00:00:00 2001 From: iChrist Date: Wed, 6 May 2026 05:47:57 +0300 Subject: [PATCH 07/25] Update language options in nodes_ace.py (#12578) * Update language options in nodes_ace.py Modified it to include all 51 language options ace-step1.5 supports instead of the original 23 comfyui had. * re-arrange list by popularity changed order of the languages to be ordered by popularity en is default unknown is last * Update comfy_extras/nodes_ace.py --- comfy_extras/nodes_ace.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy_extras/nodes_ace.py b/comfy_extras/nodes_ace.py index 1602add84..affcf3b71 100644 --- a/comfy_extras/nodes_ace.py +++ b/comfy_extras/nodes_ace.py @@ -42,7 +42,7 @@ class TextEncodeAceStepAudio15(IO.ComfyNode): IO.Int.Input("bpm", default=120, min=10, max=300), IO.Float.Input("duration", default=120.0, min=0.0, max=2000.0, step=0.1), IO.Combo.Input("timesignature", options=['2', '3', '4', '6']), - IO.Combo.Input("language", options=["en", "ja", "zh", "es", "de", "fr", "pt", "ru", "it", "nl", "pl", "tr", "vi", "cs", "fa", "id", "ko", "uk", "hu", "ar", "sv", "ro", "el"]), + IO.Combo.Input("language", options=['ar', 'az', 'bg', 'bn', 'ca', 'cs', 'da', 'de', 'el', 'en', 'es', 'fa', 'fi', 'fr', 'he', 'hi', 'hr', 'ht', 'hu', 'id', 'is', 'it', 'ja', 'ko', 'la', 'lt', 'ms', 'ne', 'nl', 'no', 'pa', 'pl', 'pt', 'ro', 'ru', 'sa', 'sk', 'sr', 'sv', 'sw', 'ta', 'te', 'th', 'tl', 'tr', 'uk', 'ur', 'vi', 'yue', 'zh', 'unknown'], default='en'), IO.Combo.Input("keyscale", options=[f"{root} {quality}" for quality in ["major", "minor"] for root in ["C", "C#", "Db", "D", "D#", "Eb", "E", "F", "F#", "Gb", "G", "G#", "Ab", "A", "A#", "Bb", "B"]]), IO.Boolean.Input("generate_audio_codes", default=True, tooltip="Enable the LLM that generates audio codes. This can be slow but will increase the quality of the generated audio. Turn this off if you are giving the model an audio reference.", advanced=True), IO.Float.Input("cfg_scale", default=2.0, min=0.0, max=100.0, step=0.1, advanced=True), From 2b63add0ad975d5f2f0cdc3d4fd8e71ae6553cbf Mon Sep 17 00:00:00 2001 From: Luke Mino-Altherr Date: Tue, 5 May 2026 19:56:09 -0700 Subject: [PATCH 08/25] fix: return millisecond timestamps from get_file_info() (#12996) --- app/user_manager.py | 4 ++-- tests-unit/prompt_server_test/user_manager_test.py | 6 +++++- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/app/user_manager.py b/app/user_manager.py index e18afb71b..0517b3344 100644 --- a/app/user_manager.py +++ b/app/user_manager.py @@ -28,8 +28,8 @@ def get_file_info(path: str, relative_to: str) -> FileInfo: return { "path": os.path.relpath(path, relative_to).replace(os.sep, '/'), "size": os.path.getsize(path), - "modified": os.path.getmtime(path), - "created": os.path.getctime(path) + "modified": int(os.path.getmtime(path) * 1000), + "created": int(os.path.getctime(path) * 1000), } diff --git a/tests-unit/prompt_server_test/user_manager_test.py b/tests-unit/prompt_server_test/user_manager_test.py index b939d8e68..27118400f 100644 --- a/tests-unit/prompt_server_test/user_manager_test.py +++ b/tests-unit/prompt_server_test/user_manager_test.py @@ -69,7 +69,11 @@ async def test_listuserdata_full_info(aiohttp_client, app, tmp_path): assert len(result) == 1 assert result[0]["path"] == "file1.txt" assert "size" in result[0] - assert "modified" in result[0] + assert isinstance(result[0]["modified"], int) + assert isinstance(result[0]["created"], int) + # Verify millisecond magnitude (timestamps after year 2000 in ms are > 946684800000) + assert result[0]["modified"] > 946684800000 + assert result[0]["created"] > 946684800000 async def test_listuserdata_split_path(aiohttp_client, app, tmp_path): From 78b3096bf36ef32378a3d4299473b820211f1601 Mon Sep 17 00:00:00 2001 From: Talmaj Date: Wed, 6 May 2026 04:59:04 +0200 Subject: [PATCH 09/25] Void model - pass 1 & 2 (CORE-38) (#13403) --- comfy/latent_formats.py | 18 + comfy/sd.py | 5 + comfy/supported_models.py | 23 + comfy/text_encoders/cogvideo.py | 42 ++ comfy_extras/nodes_void.py | 483 +++++++++++++++++ comfy_extras/void_noise_warp.py | 494 ++++++++++++++++++ folder_paths.py | 2 + .../optical_flow/put_optical_flow_models_here | 0 nodes.py | 5 +- 9 files changed, 1070 insertions(+), 2 deletions(-) create mode 100644 comfy_extras/nodes_void.py create mode 100644 comfy_extras/void_noise_warp.py create mode 100644 models/optical_flow/put_optical_flow_models_here diff --git a/comfy/latent_formats.py b/comfy/latent_formats.py index 60c0dfd7e..91bebed3d 100644 --- a/comfy/latent_formats.py +++ b/comfy/latent_formats.py @@ -793,9 +793,27 @@ class ZImagePixelSpace(ChromaRadiance): pass class CogVideoX(LatentFormat): + """Latent format for CogVideoX-2b (THUDM/CogVideoX-2b). + + scale_factor matches the vae/config.json scaling_factor for the 2b variant. + The 5b-class checkpoints (CogVideoX-5b, CogVideoX-1.5-5B, CogVideoX-Fun-V1.5-*) + use a different value; see CogVideoX1_5 below. + """ latent_channels = 16 latent_dimensions = 3 temporal_downscale_ratio = 4 def __init__(self): self.scale_factor = 1.15258426 + + +class CogVideoX1_5(CogVideoX): + """Latent format for 5b-class CogVideoX checkpoints. + + Covers THUDM/CogVideoX-5b, THUDM/CogVideoX-1.5-5B, and the CogVideoX-Fun + V1.5-5b family (including VOID inpainting). All of these have + scaling_factor=0.7 in their vae/config.json. Auto-selected in + supported_models.CogVideoX_T2V based on transformer hidden dim. + """ + def __init__(self): + self.scale_factor = 0.7 diff --git a/comfy/sd.py b/comfy/sd.py index 9fce0e7d0..749bdd710 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -66,6 +66,7 @@ import comfy.text_encoders.longcat_image import comfy.text_encoders.qwen35 import comfy.text_encoders.ernie import comfy.text_encoders.gemma4 +import comfy.text_encoders.cogvideo import comfy.model_patcher import comfy.lora @@ -1224,6 +1225,7 @@ class CLIPType(Enum): NEWBIE = 24 FLUX2 = 25 LONGCAT_IMAGE = 26 + COGVIDEOX = 27 @@ -1428,6 +1430,9 @@ def load_text_encoder_state_dicts(state_dicts=[], embedding_directory=None, clip clip_target.clip = comfy.text_encoders.hidream.hidream_clip(**t5xxl_detect(clip_data), clip_l=False, clip_g=False, t5=True, llama=False, dtype_llama=None) clip_target.tokenizer = comfy.text_encoders.hidream.HiDreamTokenizer + elif clip_type == CLIPType.COGVIDEOX: + clip_target.clip = comfy.text_encoders.cogvideo.cogvideo_te(**t5xxl_detect(clip_data)) + clip_target.tokenizer = comfy.text_encoders.cogvideo.CogVideoXTokenizer else: #CLIPType.MOCHI clip_target.clip = comfy.text_encoders.genmo.mochi_te(**t5xxl_detect(clip_data)) clip_target.tokenizer = comfy.text_encoders.genmo.MochiT5Tokenizer diff --git a/comfy/supported_models.py b/comfy/supported_models.py index dff40461f..6a9613602 100644 --- a/comfy/supported_models.py +++ b/comfy/supported_models.py @@ -1872,6 +1872,14 @@ class CogVideoX_T2V(supported_models_base.BASE): vae_key_prefix = ["vae."] text_encoder_key_prefix = ["text_encoders."] + def __init__(self, unet_config): + # 2b-class (dim=1920, heads=30) uses scale_factor=1.15258426. + # 5b-class (dim=3072, heads=48) — incl. CogVideoX-5b, 1.5-5B, and + # Fun-V1.5 inpainting — uses scale_factor=0.7 per vae/config.json. + if unet_config.get("num_attention_heads", 0) >= 48: + self.latent_format = latent_formats.CogVideoX1_5 + super().__init__(unet_config) + def get_model(self, state_dict, prefix="", device=None): # CogVideoX 1.5 (patch_size_t=2) has different training base dimensions for RoPE if self.unet_config.get("patch_size_t") is not None: @@ -1898,6 +1906,20 @@ class CogVideoX_I2V(CogVideoX_T2V): out = model_base.CogVideoX(self, image_to_video=True, device=device) return out +class CogVideoX_Inpaint(CogVideoX_T2V): + unet_config = { + "image_model": "cogvideox", + "in_channels": 48, + } + + def get_model(self, state_dict, prefix="", device=None): + if self.unet_config.get("patch_size_t") is not None: + self.unet_config.setdefault("sample_height", 96) + self.unet_config.setdefault("sample_width", 170) + self.unet_config.setdefault("sample_frames", 81) + out = model_base.CogVideoX(self, image_to_video=True, device=device) + return out + models = [ LotusD, @@ -1978,6 +2000,7 @@ models = [ ErnieImage, SAM3, SAM31, + CogVideoX_Inpaint, CogVideoX_I2V, CogVideoX_T2V, SVD_img2vid, diff --git a/comfy/text_encoders/cogvideo.py b/comfy/text_encoders/cogvideo.py index f1e8e3f5d..b97310709 100644 --- a/comfy/text_encoders/cogvideo.py +++ b/comfy/text_encoders/cogvideo.py @@ -1,6 +1,48 @@ import comfy.text_encoders.sd3_clip +from comfy import sd1_clip class CogVideoXT5Tokenizer(comfy.text_encoders.sd3_clip.T5XXLTokenizer): + """Inner T5 tokenizer for CogVideoX. + + CogVideoX was trained with T5 embeddings padded to 226 tokens (not 77 like SD3). + Used both directly by supported_models.CogVideoX_T2V.clip_target (paired with + the raw T5XXLModel) and by the CogVideoXTokenizer outer wrapper below. + """ def __init__(self, embedding_directory=None, tokenizer_data={}): super().__init__(embedding_directory=embedding_directory, tokenizer_data=tokenizer_data, min_length=226) + + +class CogVideoXTokenizer(sd1_clip.SD1Tokenizer): + """Outer tokenizer wrapper for CLIPLoader (type="cogvideox").""" + def __init__(self, embedding_directory=None, tokenizer_data={}): + super().__init__(embedding_directory=embedding_directory, tokenizer_data=tokenizer_data, + clip_name="t5xxl", tokenizer=CogVideoXT5Tokenizer) + + +class CogVideoXT5XXL(sd1_clip.SD1ClipModel): + """Outer T5XXL model wrapper for CLIPLoader (type="cogvideox"). + + Wraps the raw T5XXL model in the SD1ClipModel interface so that CLIP.__init__ + (which reads self.dtypes) works correctly. The inner model is the standard + sd3_clip.T5XXLModel (no attention_mask change needed for CogVideoX). + """ + def __init__(self, device="cpu", dtype=None, model_options={}): + super().__init__(device=device, dtype=dtype, name="t5xxl", + clip_model=comfy.text_encoders.sd3_clip.T5XXLModel, + model_options=model_options) + + +def cogvideo_te(dtype_t5=None, t5_quantization_metadata=None): + """Factory that returns a CogVideoXT5XXL class configured with the detected + T5 dtype and optional quantization metadata, for use in load_text_encoder_state_dicts. + """ + class CogVideoXTEModel_(CogVideoXT5XXL): + def __init__(self, device="cpu", dtype=None, model_options={}): + if t5_quantization_metadata is not None: + model_options = model_options.copy() + model_options["t5xxl_quantization_metadata"] = t5_quantization_metadata + if dtype_t5 is not None: + dtype = dtype_t5 + super().__init__(device=device, dtype=dtype, model_options=model_options) + return CogVideoXTEModel_ diff --git a/comfy_extras/nodes_void.py b/comfy_extras/nodes_void.py new file mode 100644 index 000000000..e7a8f3757 --- /dev/null +++ b/comfy_extras/nodes_void.py @@ -0,0 +1,483 @@ +import logging + +import torch + +import comfy +import comfy.model_management +import comfy.model_patcher +import comfy.samplers +import comfy.utils +import folder_paths +import node_helpers +import nodes +from comfy.utils import model_trange as trange +from comfy_api.latest import ComfyExtension, io +from torchvision.models.optical_flow import raft_large +from typing_extensions import override + + +from comfy_extras.void_noise_warp import RaftOpticalFlow, get_noise_from_video + +OpticalFlow = io.Custom("OPTICAL_FLOW") + +TEMPORAL_COMPRESSION = 4 +PATCH_SIZE_T = 2 + + +def _valid_void_length(length: int) -> int: + """Round ``length`` down to a value that produces an even latent_t. + + VOID / CogVideoX-Fun-V1.5 uses patch_size_t=2, so the VAE-encoded latent + must have an even temporal dimension. If latent_t is odd, the transformer + pad_to_patch_size circular-wraps an extra latent frame onto the end; after + the post-transformer crop the last real latent frame has been influenced + by the wrapped phantom frame, producing visible jitter and "disappearing" + subjects near the end of the decoded video. Rounding down fixes this. + """ + latent_t = ((length - 1) // TEMPORAL_COMPRESSION) + 1 + if latent_t % PATCH_SIZE_T == 0: + return length + # Round latent_t down to the nearest multiple of PATCH_SIZE_T, then invert + # the ((length - 1) // TEMPORAL_COMPRESSION) + 1 formula. Floor at 1 frame + # so we never return a non-positive length. + target_latent_t = max(PATCH_SIZE_T, (latent_t // PATCH_SIZE_T) * PATCH_SIZE_T) + return (target_latent_t - 1) * TEMPORAL_COMPRESSION + 1 + + +class OpticalFlowLoader(io.ComfyNode): + """Load an optical flow model from ``models/optical_flow/``. + + Only torchvision's RAFT-large format is recognized today (the model used + by VOIDWarpedNoise). The checkpoint must be placed under + ``models/optical_flow/`` — ComfyUI never downloads optical-flow weights + at runtime. + """ + + @classmethod + def define_schema(cls): + return io.Schema( + node_id="OpticalFlowLoader", + display_name="Load Optical Flow Model", + category="loaders", + inputs=[ + io.Combo.Input( + "model_name", + options=folder_paths.get_filename_list("optical_flow"), + tooltip=( + "Optical flow model to load. Files must be placed in the " + "'optical_flow' folder. Today only torchvision's " + "raft_large.pth is supported." + ), + ), + ], + outputs=[ + OpticalFlow.Output(), + ], + ) + + @classmethod + def execute(cls, model_name) -> io.NodeOutput: + + model_path = folder_paths.get_full_path_or_raise("optical_flow", model_name) + sd = comfy.utils.load_torch_file(model_path, safe_load=True) + + has_raft_keys = ( + any(k.startswith("feature_encoder.") for k in sd) + and any(k.startswith("context_encoder.") for k in sd) + and any(k.startswith("update_block.") for k in sd) + ) + if not has_raft_keys: + raise ValueError( + "Unrecognized optical flow model format: expected a torchvision " + "RAFT-large state dict with 'feature_encoder.', 'context_encoder.' " + "and 'update_block.' prefixes." + ) + + model = raft_large(weights=None, progress=False) + model.load_state_dict(sd) + model.eval().to(torch.float32) + + patcher = comfy.model_patcher.ModelPatcher( + model, + load_device=comfy.model_management.get_torch_device(), + offload_device=comfy.model_management.unet_offload_device(), + ) + return io.NodeOutput(patcher) + + +class VOIDQuadmaskPreprocess(io.ComfyNode): + """Preprocess a quadmask video for VOID inpainting. + + Quantizes mask values to four semantic levels, inverts, and normalizes: + 0 -> primary object to remove + 63 -> overlap of primary + affected + 127 -> affected region (interactions) + 255 -> background (keep) + + After inversion and normalization, the output mask has values in [0, 1] + with four discrete levels: 1.0 (remove), ~0.75, ~0.50, 0.0 (keep). + """ + + @classmethod + def define_schema(cls): + return io.Schema( + node_id="VOIDQuadmaskPreprocess", + category="mask/video", + inputs=[ + io.Mask.Input("mask"), + io.Int.Input("dilate_width", default=0, min=0, max=50, step=1, + tooltip="Dilation radius for the primary mask region (0 = no dilation)"), + ], + outputs=[ + io.Mask.Output(display_name="quadmask"), + ], + ) + + @classmethod + def execute(cls, mask, dilate_width=0) -> io.NodeOutput: + m = mask.clone() + + if m.max() <= 1.0: + m = m * 255.0 + + if dilate_width > 0 and m.ndim >= 3: + binary = (m < 128).float() + kernel_size = dilate_width * 2 + 1 + if binary.ndim == 3: + binary = binary.unsqueeze(1) + dilated = torch.nn.functional.max_pool2d( + binary, kernel_size=kernel_size, stride=1, padding=dilate_width + ) + if dilated.ndim == 4: + dilated = dilated.squeeze(1) + m = torch.where(dilated > 0.5, torch.zeros_like(m), m) + + m = torch.where(m <= 31, torch.zeros_like(m), m) + m = torch.where((m > 31) & (m <= 95), torch.full_like(m, 63), m) + m = torch.where((m > 95) & (m <= 191), torch.full_like(m, 127), m) + m = torch.where(m > 191, torch.full_like(m, 255), m) + + m = (255.0 - m) / 255.0 + + return io.NodeOutput(m) + + +class VOIDInpaintConditioning(io.ComfyNode): + """Build VOID inpainting conditioning for CogVideoX. + + Encodes the processed quadmask and masked source video through the VAE, + producing a 32-channel concat conditioning (16ch mask + 16ch masked video) + that gets concatenated with the 16ch noise latent by the model. + """ + + @classmethod + def define_schema(cls): + return io.Schema( + node_id="VOIDInpaintConditioning", + category="conditioning/video_models", + inputs=[ + io.Conditioning.Input("positive"), + io.Conditioning.Input("negative"), + io.Vae.Input("vae"), + io.Image.Input("video", tooltip="Source video frames [T, H, W, 3]"), + io.Mask.Input("quadmask", tooltip="Preprocessed quadmask from VOIDQuadmaskPreprocess [T, H, W]"), + io.Int.Input("width", default=672, min=16, max=nodes.MAX_RESOLUTION, step=8), + io.Int.Input("height", default=384, min=16, max=nodes.MAX_RESOLUTION, step=8), + io.Int.Input("length", default=45, min=1, max=nodes.MAX_RESOLUTION, step=1, + tooltip="Number of pixel frames to process. For CogVideoX-Fun-V1.5 " + "(patch_size_t=2), latent_t must be even — lengths that " + "produce odd latent_t are rounded down (e.g. 49 → 45)."), + io.Int.Input("batch_size", default=1, min=1, max=64), + ], + outputs=[ + io.Conditioning.Output(display_name="positive"), + io.Conditioning.Output(display_name="negative"), + io.Latent.Output(display_name="latent"), + ], + ) + + @classmethod + def execute(cls, positive, negative, vae, video, quadmask, + width, height, length, batch_size) -> io.NodeOutput: + + adjusted_length = _valid_void_length(length) + if adjusted_length != length: + logging.warning( + "VOIDInpaintConditioning: rounding length %d down to %d so that " + "latent_t is even (required by CogVideoX-Fun-V1.5 patch_size_t=2). " + "Using odd latent_t causes the last frame to be corrupted by " + "circular padding.", length, adjusted_length, + ) + length = adjusted_length + + latent_t = ((length - 1) // TEMPORAL_COMPRESSION) + 1 + latent_h = height // 8 + latent_w = width // 8 + + vid = video[:length] + vid = comfy.utils.common_upscale( + vid.movedim(-1, 1), width, height, "bilinear", "center" + ).movedim(1, -1) + + qm = quadmask[:length] + if qm.ndim == 3: + qm = qm.unsqueeze(-1) + qm = comfy.utils.common_upscale( + qm.movedim(-1, 1), width, height, "bilinear", "center" + ).movedim(1, -1) + if qm.ndim == 4 and qm.shape[-1] == 1: + qm = qm.squeeze(-1) + + mask_condition = qm + if mask_condition.ndim == 3: + mask_condition_3ch = mask_condition.unsqueeze(-1).expand(-1, -1, -1, 3) + else: + mask_condition_3ch = mask_condition + + inverted_mask_3ch = 1.0 - mask_condition_3ch + masked_video = vid[:, :, :, :3] * (1.0 - mask_condition_3ch) + + mask_latents = vae.encode(inverted_mask_3ch) + masked_video_latents = vae.encode(masked_video) + + def _match_temporal(lat, target_t): + if lat.shape[2] > target_t: + return lat[:, :, :target_t] + elif lat.shape[2] < target_t: + pad = target_t - lat.shape[2] + return torch.cat([lat, lat[:, :, -1:].repeat(1, 1, pad, 1, 1)], dim=2) + return lat + + mask_latents = _match_temporal(mask_latents, latent_t) + masked_video_latents = _match_temporal(masked_video_latents, latent_t) + + inpaint_latents = torch.cat([mask_latents, masked_video_latents], dim=1) + + # No explicit scaling needed here: the model's CogVideoX.concat_cond() + # applies process_latent_in (×latent_format.scale_factor) to each 16-ch + # block of the stored conditioning. For 5b-class checkpoints (incl. the + # VOID/CogVideoX-Fun-V1.5 inpainting model) that scale_factor is auto- + # selected as 0.7 in supported_models.CogVideoX_T2V, which matches the + # diffusers vae/config.json scaling_factor VOID was trained with. + + positive = node_helpers.conditioning_set_values( + positive, {"concat_latent_image": inpaint_latents} + ) + negative = node_helpers.conditioning_set_values( + negative, {"concat_latent_image": inpaint_latents} + ) + + noise_latent = torch.zeros( + [batch_size, 16, latent_t, latent_h, latent_w], + device=comfy.model_management.intermediate_device() + ) + + return io.NodeOutput(positive, negative, {"samples": noise_latent}) + + +class VOIDWarpedNoise(io.ComfyNode): + """Generate optical-flow warped noise for VOID Pass 2 refinement. + + Takes the Pass 1 output video and produces temporally-correlated noise + by warping Gaussian noise along optical flow vectors. This noise is used + as the initial latent for Pass 2, resulting in better temporal consistency. + """ + + @classmethod + def define_schema(cls): + return io.Schema( + node_id="VOIDWarpedNoise", + category="latent/video", + inputs=[ + OpticalFlow.Input( + "optical_flow", + tooltip="Optical flow model from OpticalFlowLoader (RAFT-large).", + ), + io.Image.Input("video", tooltip="Pass 1 output video frames [T, H, W, 3]"), + io.Int.Input("width", default=672, min=16, max=nodes.MAX_RESOLUTION, step=8), + io.Int.Input("height", default=384, min=16, max=nodes.MAX_RESOLUTION, step=8), + io.Int.Input("length", default=45, min=1, max=nodes.MAX_RESOLUTION, step=1, + tooltip="Number of pixel frames. Rounded down to make latent_t " + "even (patch_size_t=2 requirement), e.g. 49 → 45."), + io.Int.Input("batch_size", default=1, min=1, max=64), + ], + outputs=[ + io.Latent.Output(display_name="warped_noise"), + ], + ) + + @classmethod + def execute(cls, optical_flow, video, width, height, length, batch_size) -> io.NodeOutput: + + adjusted_length = _valid_void_length(length) + if adjusted_length != length: + logging.warning( + "VOIDWarpedNoise: rounding length %d down to %d so that " + "latent_t is even (required by CogVideoX-Fun-V1.5 patch_size_t=2).", + length, adjusted_length, + ) + length = adjusted_length + + latent_t = ((length - 1) // TEMPORAL_COMPRESSION) + 1 + latent_h = height // 8 + latent_w = width // 8 + + # RAFT + noise warp is real compute, not an "intermediate" buffer, so + # we want the actual torch device (CUDA/MPS). The final latent is + # moved back to intermediate_device() before returning to match the + # rest of the ComfyUI pipeline. + device = comfy.model_management.get_torch_device() + + comfy.model_management.load_model_gpu(optical_flow) + raft = RaftOpticalFlow(optical_flow.model, device=device) + + vid = video[:length].to(device) + vid = comfy.utils.common_upscale( + vid.movedim(-1, 1), width, height, "bilinear", "center" + ).movedim(1, -1) + vid_uint8 = (vid.clamp(0, 1) * 255).to(torch.uint8) + + FRAME = 2**-1 + FLOW = 2**3 + LATENT_SCALE = 8 + + warped = get_noise_from_video( + vid_uint8, + raft, + noise_channels=16, + resize_frames=FRAME, + resize_flow=FLOW, + downscale_factor=round(FRAME * FLOW) * LATENT_SCALE, + device=device, + ) + + if warped.shape[0] != latent_t: + indices = torch.linspace(0, warped.shape[0] - 1, latent_t, + device=device).long() + warped = warped[indices] + + if warped.shape[1] != latent_h or warped.shape[2] != latent_w: + # (T, H, W, C) → (T, C, H, W) → bilinear resize → back + warped = warped.permute(0, 3, 1, 2) + warped = torch.nn.functional.interpolate( + warped, size=(latent_h, latent_w), + mode="bilinear", align_corners=False, + ) + warped = warped.permute(0, 2, 3, 1) + + # (T, H, W, C) → (B, C, T, H, W) + warped_tensor = warped.permute(3, 0, 1, 2).unsqueeze(0) + if batch_size > 1: + warped_tensor = warped_tensor.repeat(batch_size, 1, 1, 1, 1) + + warped_tensor = warped_tensor.to(comfy.model_management.intermediate_device()) + return io.NodeOutput({"samples": warped_tensor}) + + +class Noise_FromLatent: + """Wraps a pre-computed LATENT tensor as a NOISE source.""" + def __init__(self, latent_dict): + self.seed = 0 + self._samples = latent_dict["samples"] + + def generate_noise(self, input_latent): + return self._samples.clone().cpu() + + +class VOIDWarpedNoiseSource(io.ComfyNode): + """Convert a LATENT (e.g. from VOIDWarpedNoise) into a NOISE source + for use with SamplerCustomAdvanced.""" + + @classmethod + def define_schema(cls): + return io.Schema( + node_id="VOIDWarpedNoiseSource", + category="sampling/custom_sampling/noise", + inputs=[ + io.Latent.Input("warped_noise", + tooltip="Warped noise latent from VOIDWarpedNoise"), + ], + outputs=[io.Noise.Output()], + ) + + @classmethod + def execute(cls, warped_noise) -> io.NodeOutput: + return io.NodeOutput(Noise_FromLatent(warped_noise)) + + +class VOID_DDIM(comfy.samplers.Sampler): + """DDIM sampler for VOID inpainting models. + + VOID was trained with the diffusers CogVideoXDDIMScheduler which operates in + alpha-space (input std ≈ 1). The standard KSampler applies noise_scaling that + multiplies by sqrt(1+sigma^2) ≈ 4500x, which is incompatible with VOID's + training. This sampler skips noise_scaling and implements the DDIM update rule + directly using sigma-to-alpha conversion. + """ + + def sample(self, model_wrap, sigmas, extra_args, callback, noise, latent_image=None, denoise_mask=None, disable_pbar=False): + x = noise.to(torch.float32) + model_options = extra_args.get("model_options", {}) + seed = extra_args.get("seed", None) + s_in = x.new_ones([x.shape[0]]) + + for i in trange(len(sigmas) - 1, disable=disable_pbar): + sigma = sigmas[i] + sigma_next = sigmas[i + 1] + + denoised = model_wrap(x, sigma * s_in, model_options=model_options, seed=seed) + + if callback is not None: + callback(i, denoised, x, len(sigmas) - 1) + + if sigma_next == 0: + x = denoised + else: + alpha_t = 1.0 / (1.0 + sigma ** 2) + alpha_prev = 1.0 / (1.0 + sigma_next ** 2) + + pred_eps = (x - (alpha_t ** 0.5) * denoised) / (1.0 - alpha_t) ** 0.5 + x = (alpha_prev ** 0.5) * denoised + (1.0 - alpha_prev) ** 0.5 * pred_eps + + return x + + +class VOIDSampler(io.ComfyNode): + """VOID DDIM sampler for use with SamplerCustom / SamplerCustomAdvanced. + + Required for VOID inpainting models. Implements the same DDIM loop that VOID + was trained with (diffusers CogVideoXDDIMScheduler), without the noise_scaling + that the standard KSampler applies. Use with RandomNoise or VOIDWarpedNoiseSource. + """ + + @classmethod + def define_schema(cls): + return io.Schema( + node_id="VOIDSampler", + category="sampling/custom_sampling/samplers", + inputs=[], + outputs=[io.Sampler.Output()], + ) + + @classmethod + def execute(cls) -> io.NodeOutput: + return io.NodeOutput(VOID_DDIM()) + + get_sampler = execute + + +class VOIDExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[io.ComfyNode]]: + return [ + OpticalFlowLoader, + VOIDQuadmaskPreprocess, + VOIDInpaintConditioning, + VOIDWarpedNoise, + VOIDWarpedNoiseSource, + VOIDSampler, + ] + + +async def comfy_entrypoint() -> VOIDExtension: + return VOIDExtension() diff --git a/comfy_extras/void_noise_warp.py b/comfy_extras/void_noise_warp.py new file mode 100644 index 000000000..fcc9a5f8b --- /dev/null +++ b/comfy_extras/void_noise_warp.py @@ -0,0 +1,494 @@ +""" +Optical-flow-warped noise for VOID Pass 2 refinement. + +Adapted from RyannDaGreat/CommonSource (MIT License, Ryan Burgert): + https://github.com/RyannDaGreat/CommonSource + - noise_warp.py (NoiseWarper / warp_xyωc / regaussianize / get_noise_from_video) + - raft.py (RaftOpticalFlow) + +Only the code paths that ``comfy_extras/nodes_void.py::VOIDWarpedNoise`` actually +uses (torch THWC uint8 input, no background removal, no visualization, no disk +I/O, default warp/noise params) have been inlined. External ``rp`` utilities +have been replaced with equivalents from torch.nn.functional / einops. The +RAFT optical-flow model itself is loaded offline via ``OpticalFlowLoader`` in +``nodes_void.py`` and passed into ``get_noise_from_video`` by the caller; this +module never downloads weights at runtime. +""" + +import logging +from typing import Optional + +import torch +import torch.nn.functional as F +from einops import rearrange + +import comfy.model_management + + +# --------------------------------------------------------------------------- +# Low-level torch image helpers (drop-in replacements for rp.torch_* primitives) +# --------------------------------------------------------------------------- + +def _torch_resize_chw(image, size, interp, copy=True): + """Resize a CHW tensor. + + ``size`` is either a scalar factor or a (h, w) tuple. ``interp`` is one + of ``"bilinear"``, ``"nearest"``, ``"area"``. When ``copy`` is False and + the requested size matches the input, returns the input tensor as is + (faster but callers must not mutate the result). + """ + if image.ndim != 3: + raise ValueError( + f"_torch_resize_chw expects a 3D CHW tensor, got shape {tuple(image.shape)}" + ) + _, in_h, in_w = image.shape + if isinstance(size, (int, float)) and not isinstance(size, bool): + new_h = max(1, int(in_h * size)) + new_w = max(1, int(in_w * size)) + else: + new_h, new_w = size + + if (new_h, new_w) == (in_h, in_w): + return image.clone() if copy else image + + kwargs = {} + if interp in ("bilinear", "bicubic"): + kwargs["align_corners"] = False + out = F.interpolate(image[None], size=(new_h, new_w), mode=interp, **kwargs)[0] + return out + + +def _torch_remap_relative(image, dx, dy, interp="bilinear"): + """Relative remap of a CHW image via ``F.grid_sample``. + + Equivalent to ``rp.torch_remap_image(image, dx, dy, relative=True, interp=interp)`` + for ``interp`` in {"bilinear", "nearest"}. Out-of-bounds samples are 0. + """ + if image.ndim != 3: + raise ValueError( + f"_torch_remap_relative expects a 3D CHW tensor, got shape {tuple(image.shape)}" + ) + if dx.shape != dy.shape: + raise ValueError( + f"_torch_remap_relative: dx and dy must match, got {tuple(dx.shape)} vs {tuple(dy.shape)}" + ) + _, h, w = image.shape + + x_abs = dx + torch.arange(w, device=dx.device, dtype=dx.dtype) + y_abs = dy + torch.arange(h, device=dy.device, dtype=dy.dtype)[:, None] + + x_norm = (x_abs / (w - 1)) * 2 - 1 + y_norm = (y_abs / (h - 1)) * 2 - 1 + + grid = torch.stack([x_norm, y_norm], dim=-1)[None].to(image.dtype) + out = F.grid_sample( + image[None], grid, mode=interp, align_corners=True, padding_mode="zeros" + )[0] + return out + + +def _torch_scatter_add_relative(image, dx, dy): + """Scatter-add a CHW image using relative floor-rounded (dx, dy) offsets. + + Equivalent to ``rp.torch_scatter_add_image(image, dx, dy, relative=True, + interp='floor')``. Out-of-bounds targets are dropped. + """ + if image.ndim != 3: + raise ValueError( + f"_torch_scatter_add_relative expects a 3D CHW tensor, got shape {tuple(image.shape)}" + ) + in_c, in_h, in_w = image.shape + if dx.shape != (in_h, in_w) or dy.shape != (in_h, in_w): + raise ValueError( + f"_torch_scatter_add_relative: dx/dy must be ({in_h}, {in_w}), " + f"got dx={tuple(dx.shape)} dy={tuple(dy.shape)}" + ) + + x = dx.long() + torch.arange(in_w, device=dx.device, dtype=torch.long) + y = dy.long() + torch.arange(in_h, device=dy.device, dtype=torch.long)[:, None] + + valid = ((y >= 0) & (y < in_h) & (x >= 0) & (x < in_w)).reshape(-1) + indices = (y * in_w + x).reshape(-1)[valid] + + flat_image = rearrange(image, "c h w -> (h w) c")[valid] + out = torch.zeros((in_h * in_w, in_c), dtype=image.dtype, device=image.device) + out.index_add_(0, indices, flat_image) + return rearrange(out, "(h w) c -> c h w", h=in_h, w=in_w) + + +# --------------------------------------------------------------------------- +# Noise warping primitives (ported from noise_warp.py) +# --------------------------------------------------------------------------- + +def unique_pixels(image): + """Find unique pixel values in a CHW tensor. + + Returns ``(unique_colors [U, C], counts [U], index_matrix [H, W])`` where + ``index_matrix[i, j]`` is the index of the unique color at that pixel. + """ + _, h, w = image.shape + flat = rearrange(image, "c h w -> (h w) c") + unique_colors, inverse_indices, counts = torch.unique( + flat, dim=0, return_inverse=True, return_counts=True, sorted=False, + ) + index_matrix = rearrange(inverse_indices, "(h w) -> h w", h=h, w=w) + return unique_colors, counts, index_matrix + + +def sum_indexed_values(image, index_matrix): + """For each unique index, sum the CHW image values at its pixels.""" + _, h, w = image.shape + u = int(index_matrix.max().item()) + 1 + flat = rearrange(image, "c h w -> (h w) c") + out = torch.zeros((u, flat.shape[1]), dtype=flat.dtype, device=flat.device) + out.index_add_(0, index_matrix.view(-1), flat) + return out + + +def indexed_to_image(index_matrix, unique_colors): + """Build a CHW image from an index matrix and a (U, C) color table.""" + h, w = index_matrix.shape + flat = unique_colors[index_matrix.view(-1)] + return rearrange(flat, "(h w) c -> c h w", h=h, w=w) + + +def regaussianize(noise): + """Variance-preserving re-sampling of a CHW noise tensor. + + Wherever the noise contains groups of identical pixel values (e.g. after + a nearest-neighbor warp that duplicated source pixels), adds zero-mean + foreign noise within each group and scales by ``1/sqrt(count)`` so the + output is unit-variance gaussian again. + """ + _, hs, ws = noise.shape + _, counts, index_matrix = unique_pixels(noise[:1]) + + foreign_noise = torch.randn_like(noise) + summed = sum_indexed_values(foreign_noise, index_matrix) + meaned = indexed_to_image(index_matrix, summed / rearrange(counts, "u -> u 1")) + zeroed_foreign = foreign_noise - meaned + + counts_image = indexed_to_image(index_matrix, rearrange(counts, "u -> u 1")) + + output = noise / counts_image ** 0.5 + zeroed_foreign + return output, counts_image + + +def xy_meshgrid_like_image(image): + """Return a (2, H, W) tensor of (x, y) pixel coordinates matching ``image``.""" + _, h, w = image.shape + y, x = torch.meshgrid( + torch.arange(h, device=image.device, dtype=image.dtype), + torch.arange(w, device=image.device, dtype=image.dtype), + indexing="ij", + ) + return torch.stack([x, y]) + + +def noise_to_state(noise): + """Pack a (C, H, W) noise tensor into a state tensor (3+C, H, W) = [dx, dy, ω, noise].""" + zeros = torch.zeros_like(noise[:1]) + ones = torch.ones_like(noise[:1]) + return torch.cat([zeros, zeros, ones, noise]) + + +def state_to_noise(state): + """Unpack the noise channels from a state tensor.""" + return state[3:] + + +def warp_state(state, flow): + """Warp a noise-warper state tensor along the given optical flow. + + ``state`` has shape ``(3+c, h, w)`` (= dx, dy, ω, c noise channels). + ``flow`` has shape ``(2, h, w)`` (= dx, dy). + """ + if flow.device != state.device: + raise ValueError( + f"warp_state: flow and state must be on the same device, " + f"got flow={flow.device} state={state.device}" + ) + if state.ndim != 3: + raise ValueError( + f"warp_state: state must be 3D (3+C, H, W), got shape {tuple(state.shape)}" + ) + xyoc, h, w = state.shape + if flow.shape != (2, h, w): + raise ValueError( + f"warp_state: flow must have shape (2, {h}, {w}), got {tuple(flow.shape)}" + ) + device = state.device + + x_ch, y_ch = 0, 1 + xy = 2 # state[:xy] = [dx, dy] + xyw = 3 # state[:xyw] = [dx, dy, ω] + w_ch = 2 # state[w_ch] = ω + c = xyoc - xyw + oc = xyoc - xy + if c <= 0: + raise ValueError( + f"warp_state: state has no noise channels (expected 3+C with C>0, got {xyoc} channels)" + ) + if not (state[w_ch] > 0).all(): + raise ValueError("warp_state: all weights in state[2] must be > 0") + + grid = xy_meshgrid_like_image(state) + + init = torch.empty_like(state) + init[:xy] = 0 + init[w_ch] = 1 + init[-c:] = 0 + + # --- Expansion branch: nearest-neighbor remap with negated flow --- + pre_expand = torch.empty_like(state) + pre_expand[:xy] = _torch_remap_relative(state[:xy], -flow[0], -flow[1], "nearest") + pre_expand[-oc:] = _torch_remap_relative(state[-oc:], -flow[0], -flow[1], "nearest") + pre_expand[w_ch][pre_expand[w_ch] == 0] = 1 + + # --- Shrink branch: scatter-add state into new positions --- + pre_shrink = state.clone() + pre_shrink[:xy] += flow + + pos = (grid + pre_shrink[:xy]).round() + in_bounds = (pos[x_ch] >= 0) & (pos[x_ch] < w) & (pos[y_ch] >= 0) & (pos[y_ch] < h) + pre_shrink = torch.where(~in_bounds[None], init, pre_shrink) + + scat_xy = pre_shrink[:xy].round() + pre_shrink[:xy] -= scat_xy + pre_shrink[:xy] = 0 # xy_mode='none' in upstream + + def scat(tensor): + return _torch_scatter_add_relative(tensor, scat_xy[0], scat_xy[1]) + + # rp.torch_scatter_add_image on a bool tensor errors on modern torch; + # scatter-sum a float ones tensor and threshold to get the mask instead. + shrink_mask = scat(torch.ones(1, h, w, dtype=state.dtype, device=device)) > 0 + + # Drop expansion samples at positions that will be filled by shrink. + pre_expand = torch.where(shrink_mask, init, pre_expand) + + # Regaussianize both branches together so duplicated-source groups are + # counted globally, then split back apart. + concat = torch.cat([pre_shrink, pre_expand], dim=2) # along width + concat[-c:], counts_image = regaussianize(concat[-c:]) + concat[w_ch] = concat[w_ch] / counts_image[0] + concat[w_ch] = concat[w_ch].nan_to_num() + pre_shrink, expand = torch.chunk(concat, chunks=2, dim=2) + + shrink = torch.empty_like(pre_shrink) + shrink[w_ch] = scat(pre_shrink[w_ch][None])[0] + shrink[:xy] = scat(pre_shrink[:xy] * pre_shrink[w_ch][None]) / shrink[w_ch][None] + shrink[-c:] = scat(pre_shrink[-c:] * pre_shrink[w_ch][None]) / scat( + pre_shrink[w_ch][None] ** 2 + ).sqrt() + + output = torch.where(shrink_mask, shrink, expand) + output[w_ch] = output[w_ch] / output[w_ch].mean() + output[w_ch] += 1e-5 + output[w_ch] **= 0.9999 + return output + + +class NoiseWarper: + """Maintain a warpable noise state and emit gaussian noise per frame. + + Simplified from RyannDaGreat/CommonSource/noise_warp.py::NoiseWarper: + ``scale_factor``, ``post_noise_alpha``, ``progressive_noise_alpha``, and + ``warp_kwargs`` are all dropped since VOIDWarpedNoise always uses defaults. + """ + + def __init__(self, c, h, w, device, dtype=torch.float32): + if c <= 0 or h <= 0 or w <= 0: + raise ValueError( + f"NoiseWarper: c/h/w must all be positive, got c={c} h={h} w={w}" + ) + self.c = c + self.h = h + self.w = w + self.device = device + self.dtype = dtype + + noise = torch.randn(c, h, w, dtype=dtype, device=device) + self._state = noise_to_state(noise) + + @property + def noise(self): + # With scale_factor=1 the "downsample to respect weights" step is a + # size-preserving no-op; the weight-variance correction math still + # runs to stay faithful to upstream. + n = state_to_noise(self._state) + weights = self._state[2:3] + return n * weights / (weights ** 2).sqrt() + + def __call__(self, dx, dy): + if dx.shape != dy.shape: + raise ValueError( + f"NoiseWarper: dx and dy must match, got {tuple(dx.shape)} vs {tuple(dy.shape)}" + ) + flow = torch.stack([dx, dy]).to(self.device, self.dtype) + _, oflowh, ofloww = flow.shape + + flow = _torch_resize_chw(flow, (self.h, self.w), "bilinear", copy=True) + flowh, floww = flow.shape[-2:] + + # Upstream scales flow[0] by flowh/oflowh and flow[1] by floww/ofloww + # (channel-order appears swapped but harmless when H and W are scaled + # by the same factor, which is always the case for our callers). + flow[0] *= flowh / oflowh + flow[1] *= floww / ofloww + + self._state = warp_state(self._state, flow) + return self + + +# --------------------------------------------------------------------------- +# RAFT optical flow wrapper (ported from raft.py) +# --------------------------------------------------------------------------- + +class RaftOpticalFlow: + """RAFT-large wrapper around a pre-loaded torchvision model. + + ``model`` must be the ``torchvision.models.optical_flow.raft_large`` module + with its weights already populated; this class is load-agnostic so the + caller owns downloading/offload concerns (see ``OpticalFlowLoader`` in + ``nodes_void.py``). ``__call__`` returns a ``(2, H, W)`` flow. + """ + + def __init__(self, model, device=None): + if device is None: + device = comfy.model_management.get_torch_device() + device = torch.device(device) if not isinstance(device, torch.device) else device + + model = model.to(device) + model.eval() + self.device = device + self.model = model + + def _preprocess(self, image_chw): + image = image_chw.to(self.device, torch.float32) + _, h, w = image.shape + new_h = (h // 8) * 8 + new_w = (w // 8) * 8 + image = _torch_resize_chw(image, (new_h, new_w), "bilinear", copy=False) + image = image * 2 - 1 + return image[None] + + def __call__(self, from_image, to_image): + """``from_image``, ``to_image``: CHW float tensors in [0, 1].""" + if from_image.shape != to_image.shape: + raise ValueError( + f"RaftOpticalFlow: from_image and to_image must match, " + f"got {tuple(from_image.shape)} vs {tuple(to_image.shape)}" + ) + _, h, w = from_image.shape + with torch.no_grad(): + img1 = self._preprocess(from_image) + img2 = self._preprocess(to_image) + list_of_flows = self.model(img1, img2) + flow = list_of_flows[-1][0] # (2, new_h, new_w) + if flow.shape[-2:] != (h, w): + flow = _torch_resize_chw(flow, (h, w), "bilinear", copy=False) + return flow + + +# --------------------------------------------------------------------------- +# Narrow entry point used by VOIDWarpedNoise +# --------------------------------------------------------------------------- + +def get_noise_from_video( + video_frames: torch.Tensor, + raft: RaftOpticalFlow, + *, + noise_channels: int = 16, + resize_frames: float = 0.5, + resize_flow: int = 8, + downscale_factor: int = 32, + device: Optional[torch.device] = None, +) -> torch.Tensor: + """Produce optical-flow-warped gaussian noise from a video. + + Args: + video_frames: ``(T, H, W, 3)`` uint8 torch tensor. + raft: Pre-loaded RAFT optical-flow wrapper (see ``RaftOpticalFlow``). + noise_channels: Channels in the output noise. + resize_frames: Pre-RAFT frame scale factor. + resize_flow: Post-flow up-scale factor applied to the optical flow; + the internal noise state is allocated at + ``(resize_flow * resize_frames * H, resize_flow * resize_frames * W)``. + downscale_factor: Area-pool factor applied to the noise before return; + should evenly divide the internal noise resolution. + device: Target device. Defaults to ``comfy.model_management.get_torch_device()``. + + Returns: + ``(T, H', W', noise_channels)`` float32 noise tensor on ``device``. + """ + if not isinstance(resize_flow, int) or resize_flow < 1: + raise ValueError( + f"get_noise_from_video: resize_flow must be a positive int, got {resize_flow!r}" + ) + if video_frames.ndim != 4 or video_frames.shape[-1] != 3: + raise ValueError( + "get_noise_from_video: video_frames must have shape (T, H, W, 3), " + f"got {tuple(video_frames.shape)}" + ) + if video_frames.dtype != torch.uint8: + raise TypeError( + "get_noise_from_video: video_frames must be uint8 in [0, 255], " + f"got dtype {video_frames.dtype}" + ) + + if device is None: + device = comfy.model_management.get_torch_device() + device = torch.device(device) if not isinstance(device, torch.device) else device + + if device.type == "cpu": + logging.warning( + "VOIDWarpedNoise: running get_noise_from_video on CPU; this will be " + "slow (minutes for ~45 frames). Use CUDA for interactive use." + ) + + T = video_frames.shape[0] + frames = video_frames.to(device).permute(0, 3, 1, 2).to(torch.float32) / 255.0 + if resize_frames != 1.0: + new_h = max(1, int(frames.shape[2] * resize_frames)) + new_w = max(1, int(frames.shape[3] * resize_frames)) + frames = F.interpolate(frames, size=(new_h, new_w), mode="area") + + _, _, H, W = frames.shape + internal_h = resize_flow * H + internal_w = resize_flow * W + if internal_h % downscale_factor or internal_w % downscale_factor: + logging.warning( + "VOIDWarpedNoise: internal noise size %dx%d is not divisible by " + "downscale_factor %d; output noise may have artifacts.", + internal_h, internal_w, downscale_factor, + ) + + with torch.no_grad(): + warper = NoiseWarper( + c=noise_channels, h=internal_h, w=internal_w, device=device, + ) + down_h = warper.h // downscale_factor + down_w = warper.w // downscale_factor + output = torch.empty( + (T, down_h, down_w, noise_channels), dtype=torch.float32, device=device, + ) + + def downscale(noise_chw): + # Area-pool to 1/downscale_factor then multiply by downscale_factor + # to adjust std (sqrt of pool area == downscale_factor for a + # square pool). + down = _torch_resize_chw(noise_chw, 1.0 / downscale_factor, "area", copy=False) + return down * downscale_factor + + output[0] = downscale(warper.noise).permute(1, 2, 0) + + prev = frames[0] + for i in range(1, T): + curr = frames[i] + flow = raft(prev, curr).to(device) + warper(flow[0], flow[1]) + output[i] = downscale(warper.noise).permute(1, 2, 0) + prev = curr + + return output diff --git a/folder_paths.py b/folder_paths.py index 039f72636..98d3b1880 100644 --- a/folder_paths.py +++ b/folder_paths.py @@ -54,6 +54,8 @@ folder_names_and_paths["audio_encoders"] = ([os.path.join(models_dir, "audio_enc folder_names_and_paths["frame_interpolation"] = ([os.path.join(models_dir, "frame_interpolation")], supported_pt_extensions) +folder_names_and_paths["optical_flow"] = ([os.path.join(models_dir, "optical_flow")], supported_pt_extensions) + output_directory = os.path.join(base_path, "output") temp_directory = os.path.join(base_path, "temp") input_directory = os.path.join(base_path, "input") diff --git a/models/optical_flow/put_optical_flow_models_here b/models/optical_flow/put_optical_flow_models_here new file mode 100644 index 000000000..e69de29bb diff --git a/nodes.py b/nodes.py index cf61d9df0..ad0cbc675 100644 --- a/nodes.py +++ b/nodes.py @@ -958,7 +958,7 @@ class CLIPLoader: @classmethod def INPUT_TYPES(s): return {"required": { "clip_name": (folder_paths.get_filename_list("text_encoders"), ), - "type": (["stable_diffusion", "stable_cascade", "sd3", "stable_audio", "mochi", "ltxv", "pixart", "cosmos", "lumina2", "wan", "hidream", "chroma", "ace", "omnigen2", "qwen_image", "hunyuan_image", "flux2", "ovis", "longcat_image"], ), + "type": (["stable_diffusion", "stable_cascade", "sd3", "stable_audio", "mochi", "ltxv", "pixart", "cosmos", "lumina2", "wan", "hidream", "chroma", "ace", "omnigen2", "qwen_image", "hunyuan_image", "flux2", "ovis", "longcat_image", "cogvideox"], ), }, "optional": { "device": (["default", "cpu"], {"advanced": True}), @@ -968,7 +968,7 @@ class CLIPLoader: CATEGORY = "advanced/loaders" - DESCRIPTION = "[Recipes]\n\nstable_diffusion: clip-l\nstable_cascade: clip-g\nsd3: t5 xxl/ clip-g / clip-l\nstable_audio: t5 base\nmochi: t5 xxl\ncosmos: old t5 xxl\nlumina2: gemma 2 2B\nwan: umt5 xxl\n hidream: llama-3.1 (Recommend) or t5\nomnigen2: qwen vl 2.5 3B" + DESCRIPTION = "[Recipes]\n\nstable_diffusion: clip-l\nstable_cascade: clip-g\nsd3: t5 xxl/ clip-g / clip-l\nstable_audio: t5 base\nmochi: t5 xxl\ncogvideox: t5 xxl (226-token padding)\ncosmos: old t5 xxl\nlumina2: gemma 2 2B\nwan: umt5 xxl\n hidream: llama-3.1 (Recommend) or t5\nomnigen2: qwen vl 2.5 3B" def load_clip(self, clip_name, type="stable_diffusion", device="default"): clip_type = getattr(comfy.sd.CLIPType, type.upper(), comfy.sd.CLIPType.STABLE_DIFFUSION) @@ -2430,6 +2430,7 @@ async def init_builtin_extra_nodes(): "nodes_rtdetr.py", "nodes_frame_interpolation.py", "nodes_sam3.py", + "nodes_void.py", ] import_failed = [] From 9c34f5f36a3815af7d21d8b42b0a5776b7406685 Mon Sep 17 00:00:00 2001 From: Comfy Org PR Bot Date: Wed, 6 May 2026 14:22:48 +0900 Subject: [PATCH 10/25] Bump comfyui-frontend-package to 1.43.17 (#13723) Co-authored-by: github-actions[bot] Co-authored-by: Alexander Brown --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index e9415f2fd..e7aa92c31 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -comfyui-frontend-package==1.42.15 +comfyui-frontend-package==1.43.17 comfyui-workflow-templates==0.9.69 comfyui-embedded-docs==0.4.4 torch From 6bcd8b96ab4650db6e834dcbb54357ebf72edfe6 Mon Sep 17 00:00:00 2001 From: guill Date: Wed, 6 May 2026 10:08:35 -0700 Subject: [PATCH 11/25] Revert "Fix Content-Disposition header missing 'attachment;' prefix (#13093)" (#13733) This reverts commit ea6880b04b88629b9dd07774298bdffea6923f9b. --- server.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/server.py b/server.py index 0e85635d3..2f3b438bb 100644 --- a/server.py +++ b/server.py @@ -560,7 +560,7 @@ class PromptServer(): buffer.seek(0) return web.Response(body=buffer.read(), content_type=f'image/{image_format}', - headers={"Content-Disposition": f"attachment; filename=\"{filename}\""}) + headers={"Content-Disposition": f"filename=\"{filename}\""}) if 'channel' not in request.rel_url.query: channel = 'rgba' @@ -580,7 +580,7 @@ class PromptServer(): buffer.seek(0) return web.Response(body=buffer.read(), content_type='image/png', - headers={"Content-Disposition": f"attachment; filename=\"{filename}\""}) + headers={"Content-Disposition": f"filename=\"{filename}\""}) elif channel == 'a': with Image.open(file) as img: @@ -597,7 +597,7 @@ class PromptServer(): alpha_buffer.seek(0) return web.Response(body=alpha_buffer.read(), content_type='image/png', - headers={"Content-Disposition": f"attachment; filename=\"{filename}\""}) + headers={"Content-Disposition": f"filename=\"{filename}\""}) else: # Use the content type from asset resolution if available, # otherwise guess from the filename. @@ -614,7 +614,7 @@ class PromptServer(): return web.FileResponse( file, headers={ - "Content-Disposition": f"attachment; filename=\"{filename}\"", + "Content-Disposition": f"filename=\"{filename}\"", "Content-Type": content_type } ) From cd8c7a2306be98bf93cd6632384a675afe750a55 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jukka=20Sepp=C3=A4nen?= <40791699+kijai@users.noreply.github.com> Date: Thu, 7 May 2026 05:41:13 +0300 Subject: [PATCH 12/25] Throttle dynamic VRAM prepare logging (#13704) --- comfy/model_patcher.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/comfy/model_patcher.py b/comfy/model_patcher.py index 7d2d6883f..33bdedfb1 100644 --- a/comfy/model_patcher.py +++ b/comfy/model_patcher.py @@ -26,6 +26,7 @@ import uuid from typing import Callable, Optional import torch +import tqdm import comfy.float import comfy.hooks @@ -1651,7 +1652,11 @@ class ModelPatcherDynamic(ModelPatcher): self.model.model_loaded_weight_memory += casted_buf.numel() * casted_buf.element_size() force_load_stat = f" Force pre-loaded {len(self.backup)} weights: {self.model.model_loaded_weight_memory // 1024} KB." if len(self.backup) > 0 else "" - logging.info(f"Model {self.model.__class__.__name__} prepared for dynamic VRAM loading. {allocated_size // (1024 ** 2)}MB Staged. {num_patches} patches attached.{force_load_stat}") + log_key = (self.patches_uuid, allocated_size, num_patches, len(self.backup), self.model.model_loaded_weight_memory) + in_loop = bool(getattr(tqdm.tqdm, "_instances", None)) + level = logging.DEBUG if in_loop and getattr(self, "_last_prepare_log_key", None) == log_key else logging.INFO + self._last_prepare_log_key = log_key + logging.log(level, f"Model {self.model.__class__.__name__} prepared for dynamic VRAM loading. {allocated_size // (1024 ** 2)}MB Staged. {num_patches} patches attached.{force_load_stat}") self.model.device = device_to self.model.current_weight_patches_uuid = self.patches_uuid From e35348aa53563cabdcd9e5f67d0cb77b5259c903 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 6 May 2026 19:51:01 -0700 Subject: [PATCH 13/25] Add .comfy_environment to portable. (#13746) --- .github/workflows/stable-release.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/stable-release.yml b/.github/workflows/stable-release.yml index f501b7b31..bc64ed74d 100644 --- a/.github/workflows/stable-release.yml +++ b/.github/workflows/stable-release.yml @@ -145,6 +145,8 @@ jobs: cp -r ComfyUI/.ci/windows_${{ inputs.rel_name }}_base_files/* ./ cp ../update_comfyui_and_python_dependencies.bat ./update/ + echo 'local-portable' > ComfyUI/.comfy_environment + cd .. "C:\Program Files\7-Zip\7z.exe" a -t7z -m0=lzma2 -mx=9 -mfb=128 -md=768m -ms=on -mf=BCJ2 ComfyUI_windows_portable.7z ComfyUI_windows_portable From 1b25f1289e6f48081b727083425791876ed0f39b Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Thu, 7 May 2026 09:45:59 +0300 Subject: [PATCH 14/25] [Partner Nodes] add grok-imagine-image-quality model (#13725) * feat(api-nodes): add grok-imagine-image-quality model Signed-off-by: bigcat88 * fixed price badges Signed-off-by: bigcat88 * fix: adjust price badges Signed-off-by: bigcat88 --------- Signed-off-by: bigcat88 Co-authored-by: Jedrzej Kosinski --- comfy_api_nodes/nodes_grok.py | 34 +++++++++++++++++++++++++++------- 1 file changed, 27 insertions(+), 7 deletions(-) diff --git a/comfy_api_nodes/nodes_grok.py b/comfy_api_nodes/nodes_grok.py index f42d84616..dd5d7e249 100644 --- a/comfy_api_nodes/nodes_grok.py +++ b/comfy_api_nodes/nodes_grok.py @@ -54,7 +54,12 @@ class GrokImageNode(IO.ComfyNode): inputs=[ IO.Combo.Input( "model", - options=["grok-imagine-image-pro", "grok-imagine-image", "grok-imagine-image-beta"], + options=[ + "grok-imagine-image-quality", + "grok-imagine-image-pro", + "grok-imagine-image", + "grok-imagine-image-beta", + ], ), IO.String.Input( "prompt", @@ -111,10 +116,12 @@ class GrokImageNode(IO.ComfyNode): ], is_api_node=True, price_badge=IO.PriceBadge( - depends_on=IO.PriceBadgeDepends(widgets=["model", "number_of_images"]), + depends_on=IO.PriceBadgeDepends(widgets=["model", "number_of_images", "resolution"]), expr=""" ( - $rate := $contains(widgets.model, "pro") ? 0.07 : 0.02; + $rate := widgets.model = "grok-imagine-image-quality" + ? (widgets.resolution = "1k" ? 0.05 : 0.07) + : ($contains(widgets.model, "pro") ? 0.07 : 0.02); {"type":"usd","usd": $rate * widgets.number_of_images} ) """, @@ -167,7 +174,12 @@ class GrokImageEditNode(IO.ComfyNode): inputs=[ IO.Combo.Input( "model", - options=["grok-imagine-image-pro", "grok-imagine-image", "grok-imagine-image-beta"], + options=[ + "grok-imagine-image-quality", + "grok-imagine-image-pro", + "grok-imagine-image", + "grok-imagine-image-beta", + ], ), IO.Image.Input("image", display_name="images"), IO.String.Input( @@ -228,11 +240,19 @@ class GrokImageEditNode(IO.ComfyNode): ], is_api_node=True, price_badge=IO.PriceBadge( - depends_on=IO.PriceBadgeDepends(widgets=["model", "number_of_images"]), + depends_on=IO.PriceBadgeDepends(widgets=["model", "number_of_images", "resolution"]), expr=""" ( - $rate := $contains(widgets.model, "pro") ? 0.07 : 0.02; - {"type":"usd","usd": 0.002 + $rate * widgets.number_of_images} + $isQualityModel := widgets.model = "grok-imagine-image-quality"; + $isPro := $contains(widgets.model, "pro"); + $rate := $isQualityModel + ? (widgets.resolution = "1k" ? 0.05 : 0.07) + : ($isPro ? 0.07 : 0.02); + $base := $isQualityModel ? 0.01 : 0.002; + $output := $rate * widgets.number_of_images; + $isPro + ? {"type":"usd","usd": $base + $output} + : {"type":"range_usd","min_usd": $base + $output, "max_usd": 3 * $base + $output} ) """, ), From 25757a53c93281e8e2462ced8795373f09e675bf Mon Sep 17 00:00:00 2001 From: "Daxiong (Lin)" Date: Thu, 7 May 2026 16:28:18 +0900 Subject: [PATCH 15/25] chore: update workflow templates to v0.9.72 (#13732) Co-authored-by: Jedrzej Kosinski --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index e7aa92c31..5c7ff76be 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ comfyui-frontend-package==1.43.17 -comfyui-workflow-templates==0.9.69 +comfyui-workflow-templates==0.9.72 comfyui-embedded-docs==0.4.4 torch torchsde From c945a433ae09423f7a2a6e9631538e55b9375f78 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Thu, 7 May 2026 21:55:09 +0300 Subject: [PATCH 16/25] fix(api-nodes): fixed price badge for Kling V3 model in the Motion Control node (#13790) Signed-off-by: bigcat88 --- comfy_api_nodes/nodes_kling.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/comfy_api_nodes/nodes_kling.py b/comfy_api_nodes/nodes_kling.py index efd58fac3..7586f1816 100644 --- a/comfy_api_nodes/nodes_kling.py +++ b/comfy_api_nodes/nodes_kling.py @@ -2787,11 +2787,15 @@ class MotionControl(IO.ComfyNode): ], is_api_node=True, price_badge=IO.PriceBadge( - depends_on=IO.PriceBadgeDepends(widgets=["mode"]), + depends_on=IO.PriceBadgeDepends(widgets=["mode", "model"]), expr=""" ( - $prices := {"std": 0.07, "pro": 0.112}; - {"type":"usd","usd": $lookup($prices, widgets.mode), "format":{"suffix":"/second"}} + $prices := { + "kling-v3": {"std": 0.126, "pro": 0.168}, + "kling-v2-6": {"std": 0.07, "pro": 0.112} + }; + $modelPrices := $lookup($prices, widgets.model); + {"type":"usd","usd": $lookup($modelPrices, widgets.mode), "format":{"suffix":"/second"}} ) """, ), From c011fb520c79b9dfbe7f885d613771774f746eef Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Thu, 7 May 2026 22:19:44 +0300 Subject: [PATCH 17/25] [Partner Nodes] new NanoBanana2 node with DynamicCombo/Autogrow (#13753) * feat(api-nodes): new NanoBanana2 node with DynamicCombo/Autogrow Signed-off-by: bigcat88 * feat: improved status text on uploading Signed-off-by: bigcat88 * feat: improved status text on uploading (2) Signed-off-by: bigcat88 --------- Signed-off-by: bigcat88 --- comfy_api_nodes/nodes_gemini.py | 242 +++++++++++++++++++++++++++++--- 1 file changed, 222 insertions(+), 20 deletions(-) diff --git a/comfy_api_nodes/nodes_gemini.py b/comfy_api_nodes/nodes_gemini.py index 2b77a022e..d18c958a8 100644 --- a/comfy_api_nodes/nodes_gemini.py +++ b/comfy_api_nodes/nodes_gemini.py @@ -83,13 +83,16 @@ class GeminiImageModel(str, Enum): async def create_image_parts( cls: type[IO.ComfyNode], - images: Input.Image, + images: Input.Image | list[Input.Image], image_limit: int = 0, ) -> list[GeminiPart]: image_parts: list[GeminiPart] = [] if image_limit < 0: raise ValueError("image_limit must be greater than or equal to 0 when creating Gemini image parts.") - total_images = get_number_of_images(images) + + # Accept either a single (possibly-batched) tensor or a list of them; share URL budget across all. + images_list: list[Input.Image] = images if isinstance(images, list) else [images] + total_images = sum(get_number_of_images(img) for img in images_list) if total_images <= 0: raise ValueError("No images provided to create_image_parts; at least one image is required.") @@ -98,10 +101,18 @@ async def create_image_parts( # Number of images we'll send as URLs (fileData) num_url_images = min(effective_max, 10) # Vertex API max number of image links + upload_kwargs: dict = {"wait_label": "Uploading reference images"} + if effective_max > num_url_images: + # Split path (e.g. 11+ images): suppress per-image counter to avoid a confusing dual-fraction label. + upload_kwargs = { + "wait_label": f"Uploading reference images ({num_url_images}+)", + "show_batch_index": False, + } reference_images_urls = await upload_images_to_comfyapi( cls, - images, + images_list, max_images=num_url_images, + **upload_kwargs, ) for reference_image_url in reference_images_urls: image_parts.append( @@ -112,15 +123,22 @@ async def create_image_parts( ) ) ) - for idx in range(num_url_images, effective_max): - image_parts.append( - GeminiPart( - inlineData=GeminiInlineData( - mimeType=GeminiMimeType.image_png, - data=tensor_to_base64_string(images[idx]), + if effective_max > num_url_images: + flat: list[torch.Tensor] = [] + for tensor in images_list: + if len(tensor.shape) == 4: + flat.extend(tensor[i] for i in range(tensor.shape[0])) + else: + flat.append(tensor) + for idx in range(num_url_images, effective_max): + image_parts.append( + GeminiPart( + inlineData=GeminiInlineData( + mimeType=GeminiMimeType.image_png, + data=tensor_to_base64_string(flat[idx]), + ) ) ) - ) return image_parts @@ -891,10 +909,6 @@ class GeminiNanoBanana2(IO.ComfyNode): "9:16", "16:9", "21:9", - # "1:4", - # "4:1", - # "8:1", - # "1:8", ], default="auto", tooltip="If set to 'auto', matches your input image's aspect ratio; " @@ -902,12 +916,7 @@ class GeminiNanoBanana2(IO.ComfyNode): ), IO.Combo.Input( "resolution", - options=[ - # "512px", - "1K", - "2K", - "4K", - ], + options=["1K", "2K", "4K"], tooltip="Target output resolution. For 2K/4K the native Gemini upscaler is used.", ), IO.Combo.Input( @@ -956,6 +965,7 @@ class GeminiNanoBanana2(IO.ComfyNode): ], is_api_node=True, price_badge=GEMINI_IMAGE_2_PRICE_BADGE, + is_deprecated=True, ) @classmethod @@ -1016,6 +1026,197 @@ class GeminiNanoBanana2(IO.ComfyNode): ) +def _nano_banana_2_v2_model_inputs(): + return [ + IO.Combo.Input( + "aspect_ratio", + options=[ + "auto", + "1:1", + "2:3", + "3:2", + "3:4", + "4:3", + "4:5", + "5:4", + "9:16", + "16:9", + "21:9", + "1:4", + "4:1", + "8:1", + "1:8", + ], + default="auto", + tooltip="If set to 'auto', matches your input image's aspect ratio; " + "if no image is provided, a 16:9 square is usually generated.", + ), + IO.Combo.Input( + "resolution", + options=["1K", "2K", "4K"], + tooltip="Target output resolution. For 2K/4K the native Gemini upscaler is used.", + ), + IO.Combo.Input( + "thinking_level", + options=["MINIMAL", "HIGH"], + ), + IO.Autogrow.Input( + "images", + template=IO.Autogrow.TemplateNames( + IO.Image.Input("image"), + names=[f"image_{i}" for i in range(1, 15)], + min=0, + ), + tooltip="Optional reference image(s). Up to 14 images total.", + ), + IO.Custom("GEMINI_INPUT_FILES").Input( + "files", + optional=True, + tooltip="Optional file(s) to use as context for the model. " + "Accepts inputs from the Gemini Generate Content Input Files node.", + ), + ] + + +class GeminiNanoBanana2V2(IO.ComfyNode): + + @classmethod + def define_schema(cls): + return IO.Schema( + node_id="GeminiNanoBanana2V2", + display_name="Nano Banana 2", + category="api node/image/Gemini", + description="Generate or edit images synchronously via Google Vertex API.", + inputs=[ + IO.String.Input( + "prompt", + multiline=True, + tooltip="Text prompt describing the image to generate or the edits to apply. " + "Include any constraints, styles, or details the model should follow.", + default="", + ), + IO.DynamicCombo.Input( + "model", + options=[ + IO.DynamicCombo.Option( + "Nano Banana 2 (Gemini 3.1 Flash Image)", + _nano_banana_2_v2_model_inputs(), + ), + ], + ), + IO.Int.Input( + "seed", + default=42, + min=0, + max=0xFFFFFFFFFFFFFFFF, + control_after_generate=True, + tooltip="When the seed is fixed to a specific value, the model makes a best effort to provide " + "the same response for repeated requests. Deterministic output isn't guaranteed. " + "Also, changing the model or parameter settings, such as the temperature, " + "can cause variations in the response even when you use the same seed value. " + "By default, a random seed value is used.", + ), + IO.Combo.Input( + "response_modalities", + options=["IMAGE", "IMAGE+TEXT"], + advanced=True, + ), + IO.String.Input( + "system_prompt", + multiline=True, + default=GEMINI_IMAGE_SYS_PROMPT, + optional=True, + tooltip="Foundational instructions that dictate an AI's behavior.", + advanced=True, + ), + ], + outputs=[ + IO.Image.Output(), + IO.String.Output(), + IO.Image.Output( + display_name="thought_image", + tooltip="First image from the model's thinking process. " + "Only available with thinking_level HIGH and IMAGE+TEXT modality.", + ), + ], + hidden=[ + IO.Hidden.auth_token_comfy_org, + IO.Hidden.api_key_comfy_org, + IO.Hidden.unique_id, + ], + is_api_node=True, + price_badge=IO.PriceBadge( + depends_on=IO.PriceBadgeDepends(widgets=["model", "model.resolution"]), + expr=""" + ( + $r := $lookup(widgets, "model.resolution"); + $prices := {"1k": 0.0696, "2k": 0.1014, "4k": 0.154}; + {"type":"usd","usd": $lookup($prices, $r), "format":{"suffix":"/Image","approximate":true}} + ) + """, + ), + ) + + @classmethod + async def execute( + cls, + prompt: str, + model: dict, + seed: int, + response_modalities: str, + system_prompt: str = "", + ) -> IO.NodeOutput: + validate_string(prompt, strip_whitespace=True, min_length=1) + model_choice = model["model"] + if model_choice == "Nano Banana 2 (Gemini 3.1 Flash Image)": + model_id = "gemini-3.1-flash-image-preview" + else: + model_id = model_choice + + images = model.get("images") or {} + parts: list[GeminiPart] = [GeminiPart(text=prompt)] + if images: + image_tensors: list[Input.Image] = [t for t in images.values() if t is not None] + if image_tensors: + if sum(get_number_of_images(t) for t in image_tensors) > 14: + raise ValueError("The current maximum number of supported images is 14.") + parts.extend(await create_image_parts(cls, image_tensors)) + files = model.get("files") + if files is not None: + parts.extend(files) + + image_config = GeminiImageConfig(imageSize=model["resolution"]) + if model["aspect_ratio"] != "auto": + image_config.aspectRatio = model["aspect_ratio"] + + gemini_system_prompt = None + if system_prompt: + gemini_system_prompt = GeminiSystemInstructionContent(parts=[GeminiTextPart(text=system_prompt)], role=None) + + response = await sync_op( + cls, + ApiEndpoint(path=f"/proxy/vertexai/gemini/{model_id}", method="POST"), + data=GeminiImageGenerateContentRequest( + contents=[ + GeminiContent(role=GeminiRole.user, parts=parts), + ], + generationConfig=GeminiImageGenerationConfig( + responseModalities=(["IMAGE"] if response_modalities == "IMAGE" else ["TEXT", "IMAGE"]), + imageConfig=image_config, + thinkingConfig=GeminiThinkingConfig(thinkingLevel=model["thinking_level"]), + ), + systemInstruction=gemini_system_prompt, + ), + response_model=GeminiGenerateContentResponse, + price_extractor=calculate_tokens_price, + ) + return IO.NodeOutput( + await get_image_from_response(response), + get_text_from_response(response), + await get_image_from_response(response, thought=True), + ) + + class GeminiExtension(ComfyExtension): @override async def get_node_list(self) -> list[type[IO.ComfyNode]]: @@ -1024,6 +1225,7 @@ class GeminiExtension(ComfyExtension): GeminiImage, GeminiImage2, GeminiNanoBanana2, + GeminiNanoBanana2V2, GeminiInputFiles, ] From 8dc3f3f2094121c0a013e21d89136ebc331d2974 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jukka=20Sepp=C3=A4nen?= <40791699+kijai@users.noreply.github.com> Date: Fri, 8 May 2026 03:18:28 +0300 Subject: [PATCH 18/25] Improve SAM3 large input handling (#13767) --- comfy/ldm/sam3/detector.py | 9 ++++--- comfy/ldm/sam3/tracker.py | 49 +++++++++++++++++++++++++------------- comfy_extras/nodes_sam3.py | 24 +++++++++++-------- 3 files changed, 53 insertions(+), 29 deletions(-) diff --git a/comfy/ldm/sam3/detector.py b/comfy/ldm/sam3/detector.py index 12d3a01ab..23a972ac7 100644 --- a/comfy/ldm/sam3/detector.py +++ b/comfy/ldm/sam3/detector.py @@ -561,7 +561,8 @@ class SAM3Model(nn.Module): return high_res_masks def forward_video(self, images, initial_masks, pbar=None, text_prompts=None, - new_det_thresh=0.5, max_objects=0, detect_interval=1): + new_det_thresh=0.5, max_objects=0, detect_interval=1, + target_device=None, target_dtype=None): """Track video with optional per-frame text-prompted detection.""" bb = self.detector.backbone["vision_backbone"] @@ -589,8 +590,10 @@ class SAM3Model(nn.Module): return self.tracker.track_video_with_detection( backbone_fn, images, initial_masks, detect_fn, new_det_thresh=new_det_thresh, max_objects=max_objects, - detect_interval=detect_interval, backbone_obj=bb, pbar=pbar) + detect_interval=detect_interval, backbone_obj=bb, pbar=pbar, + target_device=target_device, target_dtype=target_dtype) # SAM3 (non-multiplex) — no detection support, requires initial masks if initial_masks is None: raise ValueError("SAM3 (non-multiplex) requires initial_mask for video tracking") - return self.tracker.track_video(backbone_fn, images, initial_masks, pbar=pbar, backbone_obj=bb) + return self.tracker.track_video(backbone_fn, images, initial_masks, pbar=pbar, backbone_obj=bb, + target_device=target_device, target_dtype=target_dtype) diff --git a/comfy/ldm/sam3/tracker.py b/comfy/ldm/sam3/tracker.py index 8f7481003..8456e90a6 100644 --- a/comfy/ldm/sam3/tracker.py +++ b/comfy/ldm/sam3/tracker.py @@ -200,8 +200,13 @@ def pack_masks(masks): def unpack_masks(packed): """Unpack bit-packed [*, H, W//8] uint8 to bool [*, H, W*8].""" - shifts = torch.arange(8, device=packed.device) - return ((packed.unsqueeze(-1) >> shifts) & 1).view(*packed.shape[:-1], -1).bool() + bits = torch.tensor([1, 2, 4, 8, 16, 32, 64, 128], dtype=torch.uint8, device=packed.device) + return (packed.unsqueeze(-1) & bits).bool().view(*packed.shape[:-1], -1) + + +def _prep_frame(images, idx, device, dt, size): + """Slice CPU full-res frames, transfer to GPU in target dtype, and resize to (size, size).""" + return comfy.utils.common_upscale(images[idx].to(device=device, dtype=dt), size, size, "bicubic", crop="disabled") def _compute_backbone(backbone_fn, frame, frame_idx=None): @@ -1078,16 +1083,19 @@ class SAM3Tracker(nn.Module): # SAM3: drop last FPN level return vision_feats[:-1], vision_pos[:-1], feat_sizes[:-1] - def _track_single_object(self, backbone_fn, images, initial_mask, pbar=None): + def _track_single_object(self, backbone_fn, images, initial_mask, pbar=None, + target_device=None, target_dtype=None): """Track one object, computing backbone per frame to save VRAM.""" N = images.shape[0] - device, dt = images.device, images.dtype + device = target_device if target_device is not None else images.device + dt = target_dtype if target_dtype is not None else images.dtype + size = self.image_size output_dict = {"cond_frame_outputs": {}, "non_cond_frame_outputs": {}} all_masks = [] for frame_idx in tqdm(range(N), desc="tracking"): vision_feats, vision_pos, feat_sizes = self._compute_backbone_frame( - backbone_fn, images[frame_idx:frame_idx + 1], frame_idx=frame_idx) + backbone_fn, _prep_frame(images, slice(frame_idx, frame_idx + 1), device, dt, size), frame_idx=frame_idx) mask_input = None if frame_idx == 0: mask_input = F.interpolate(initial_mask.to(device=device, dtype=dt), @@ -1114,12 +1122,13 @@ class SAM3Tracker(nn.Module): return torch.cat(all_masks, dim=0) # [N, 1, H, W] - def track_video(self, backbone_fn, images, initial_masks, pbar=None, **kwargs): + def track_video(self, backbone_fn, images, initial_masks, pbar=None, + target_device=None, target_dtype=None, **kwargs): """Track one or more objects across video frames. Args: backbone_fn: callable that returns (sam2_features, sam2_positions, trunk_out) for a frame - images: [N, 3, 1008, 1008] video frames + images: [N, 3, H, W] CPU full-res video frames (resized per-frame to self.image_size) initial_masks: [N_obj, 1, H, W] binary masks for first frame (one per object) pbar: optional progress bar @@ -1130,7 +1139,8 @@ class SAM3Tracker(nn.Module): per_object = [] for obj_idx in range(N_obj): obj_masks = self._track_single_object( - backbone_fn, images, initial_masks[obj_idx:obj_idx + 1], pbar=pbar) + backbone_fn, images, initial_masks[obj_idx:obj_idx + 1], pbar=pbar, + target_device=target_device, target_dtype=target_dtype) per_object.append(obj_masks) return torch.cat(per_object, dim=1) # [N, N_obj, H, W] @@ -1632,11 +1642,18 @@ class SAM31Tracker(nn.Module): return det_scores[new_dets].tolist() if det_scores is not None else [0.0] * new_dets.sum().item() return [] + INTERNAL_MAX_OBJECTS = 64 # Hard ceiling on accumulated tracks; max_objects=0 or any value above this is clamped here. + def track_video_with_detection(self, backbone_fn, images, initial_masks, detect_fn=None, new_det_thresh=0.5, max_objects=0, detect_interval=1, - backbone_obj=None, pbar=None): + backbone_obj=None, pbar=None, target_device=None, target_dtype=None): """Track with optional per-frame detection. Returns [N, max_N_obj, H, W] mask logits.""" - N, device, dt = images.shape[0], images.device, images.dtype + if max_objects <= 0 or max_objects > self.INTERNAL_MAX_OBJECTS: + max_objects = self.INTERNAL_MAX_OBJECTS + N = images.shape[0] + device = target_device if target_device is not None else images.device + dt = target_dtype if target_dtype is not None else images.dtype + size = self.image_size output_dict = {"cond_frame_outputs": {}, "non_cond_frame_outputs": {}} all_masks = [] idev = comfy.model_management.intermediate_device() @@ -1656,7 +1673,7 @@ class SAM31Tracker(nn.Module): prefetch = True except RuntimeError: pass - cur_bb = self._compute_backbone_frame(backbone_fn, images[0:1], frame_idx=0) + cur_bb = self._compute_backbone_frame(backbone_fn, _prep_frame(images, slice(0, 1), device, dt, size), frame_idx=0) for frame_idx in tqdm(range(N), desc="tracking"): vision_feats, vision_pos, feat_sizes, high_res_prop, trunk_out = cur_bb @@ -1666,7 +1683,7 @@ class SAM31Tracker(nn.Module): backbone_stream.wait_stream(torch.cuda.current_stream(device)) with torch.cuda.stream(backbone_stream): next_bb = self._compute_backbone_frame( - backbone_fn, images[frame_idx + 1:frame_idx + 2], frame_idx=frame_idx + 1) + backbone_fn, _prep_frame(images, slice(frame_idx + 1, frame_idx + 2), device, dt, size), frame_idx=frame_idx + 1) # Per-frame detection with NMS (skip if no detect_fn, or interval/max not met) det_masks = torch.empty(0, device=device) @@ -1687,7 +1704,7 @@ class SAM31Tracker(nn.Module): current_out = self._condition_with_masks( initial_masks.to(device=device, dtype=dt), frame_idx, vision_feats, vision_pos, feat_sizes, high_res_prop, output_dict, N, mux_state, backbone_obj, - images[frame_idx:frame_idx + 1], trunk_out) + _prep_frame(images, slice(frame_idx, frame_idx + 1), device, dt, size), trunk_out) last_occluded = torch.full((mux_state.total_valid_entries,), -1, device=device, dtype=torch.long) obj_scores = [1.0] * mux_state.total_valid_entries if keep_alive is not None: @@ -1702,7 +1719,7 @@ class SAM31Tracker(nn.Module): current_out = self._condition_with_masks( det_masks, frame_idx, vision_feats, vision_pos, feat_sizes, high_res_prop, output_dict, N, mux_state, backbone_obj, - images[frame_idx:frame_idx + 1], trunk_out, threshold=0.0) + _prep_frame(images, slice(frame_idx, frame_idx + 1), device, dt, size), trunk_out, threshold=0.0) last_occluded = torch.full((mux_state.total_valid_entries,), -1, device=device, dtype=torch.long) obj_scores = det_scores[:mux_state.total_valid_entries].tolist() if keep_alive is not None: @@ -1718,7 +1735,7 @@ class SAM31Tracker(nn.Module): torch.cuda.current_stream(device).wait_stream(backbone_stream) cur_bb = next_bb else: - cur_bb = self._compute_backbone_frame(backbone_fn, images[frame_idx + 1:frame_idx + 2], frame_idx=frame_idx + 1) + cur_bb = self._compute_backbone_frame(backbone_fn, _prep_frame(images, slice(frame_idx + 1, frame_idx + 2), device, dt, size), frame_idx=frame_idx + 1) continue else: N_obj = mux_state.total_valid_entries @@ -1768,7 +1785,7 @@ class SAM31Tracker(nn.Module): torch.cuda.current_stream(device).wait_stream(backbone_stream) cur_bb = next_bb else: - cur_bb = self._compute_backbone_frame(backbone_fn, images[frame_idx + 1:frame_idx + 2], frame_idx=frame_idx + 1) + cur_bb = self._compute_backbone_frame(backbone_fn, _prep_frame(images, slice(frame_idx + 1, frame_idx + 2), device, dt, size), frame_idx=frame_idx + 1) if not all_masks or all(m is None for m in all_masks): return {"packed_masks": None, "n_frames": N, "scores": []} diff --git a/comfy_extras/nodes_sam3.py b/comfy_extras/nodes_sam3.py index 5cf92ccb3..c460506bf 100644 --- a/comfy_extras/nodes_sam3.py +++ b/comfy_extras/nodes_sam3.py @@ -272,8 +272,8 @@ class SAM3_VideoTrack(io.ComfyNode): io.Model.Input("model", display_name="model"), io.Mask.Input("initial_mask", display_name="initial_mask", optional=True, tooltip="Mask(s) for the first frame to track (one per object)"), io.Conditioning.Input("conditioning", display_name="conditioning", optional=True, tooltip="Text conditioning for detecting new objects during tracking"), - io.Float.Input("detection_threshold", display_name="detection_threshold", default=0.5, min=0.0, max=1.0, step=0.01, tooltip="Score threshold for text-prompted detection"), - io.Int.Input("max_objects", display_name="max_objects", default=0, min=0, tooltip="Max tracked objects (0=unlimited). Initial masks count toward this limit."), + io.Float.Input("detection_threshold", display_name="detection_threshold", default=0.5, min=0.0, max=1.0, step=0.01, tooltip="Score threshold for text-prompted detection."), + io.Int.Input("max_objects", display_name="max_objects", default=4, min=0, max=64, tooltip="Max tracked objects. Initial masks count toward this limit. 0 uses the internal cap of 64."), io.Int.Input("detect_interval", display_name="detect_interval", default=1, min=1, tooltip="Run detection every N frames (1=every frame). Higher values save compute."), ], outputs=[ @@ -290,8 +290,7 @@ class SAM3_VideoTrack(io.ComfyNode): dtype = model.model.get_dtype() sam3_model = model.model.diffusion_model - frames = images[..., :3].movedim(-1, 1) - frames_in = comfy.utils.common_upscale(frames, 1008, 1008, "bilinear", crop="disabled").to(device=device, dtype=dtype) + frames_in = images[..., :3].movedim(-1, 1) init_masks = None if initial_mask is not None: @@ -308,7 +307,7 @@ class SAM3_VideoTrack(io.ComfyNode): result = sam3_model.forward_video( images=frames_in, initial_masks=init_masks, pbar=pbar, text_prompts=text_prompts, new_det_thresh=detection_threshold, max_objects=max_objects, - detect_interval=detect_interval) + detect_interval=detect_interval, target_device=device, target_dtype=dtype) result["orig_size"] = (H, W) return io.NodeOutput(result) @@ -449,14 +448,18 @@ class SAM3_TrackPreview(io.ComfyNode): cx = (bool_masks * grid_x).sum(dim=(-1, -2)) // area has = area > 1 scores = track_data.get("scores", []) + label_scale = max(3, H // 240) # Scale font with resolutio + size_caps = (area.float().sqrt() / 15).clamp_(min=1).long().tolist() #cap per-object so the number doesn't dwarf small masks for obj_idx in range(N_obj): if has[obj_idx]: _cx, _cy = int(cx[obj_idx]), int(cy[obj_idx]) color = cls.COLORS[obj_idx % len(cls.COLORS)] - SAM3_TrackPreview._draw_number_gpu(frame_gpu, obj_idx, _cx, _cy, color) + obj_scale = min(label_scale, size_caps[obj_idx]) + score_scale = max(1, obj_scale * 2 // 3) + SAM3_TrackPreview._draw_number_gpu(frame_gpu, obj_idx, _cx, _cy, color, scale=obj_scale) if obj_idx < len(scores) and scores[obj_idx] < 1.0: SAM3_TrackPreview._draw_number_gpu(frame_gpu, int(scores[obj_idx] * 100), - _cx, _cy + 5 * 3 + 3, color, scale=2) + _cx, _cy + 5 * obj_scale + 3, color, scale=score_scale) frame_cpu.copy_(frame_gpu.clamp_(0, 1).mul_(255).byte()) else: frame_cpu.copy_(frame.clamp_(0, 1).mul_(255).byte()) @@ -507,9 +510,10 @@ class SAM3_TrackToMask(io.ComfyNode): if not indices: return io.NodeOutput(torch.zeros(N, H, W, device=comfy.model_management.intermediate_device())) - selected = packed[:, indices] - binary = unpack_masks(selected) # [N, len(indices), Hm, Wm] bool - union = binary.any(dim=1, keepdim=True).float() + union_packed = packed[:, indices[0]].clone() + for i in indices[1:]: + union_packed |= packed[:, i] + union = unpack_masks(union_packed).unsqueeze(1).float() # [N, 1, Hm, Wm] mask_out = F.interpolate(union, size=(H, W), mode="bilinear", align_corners=False)[:, 0] return io.NodeOutput(mask_out) From ef8f25601a8504647caf9c9213a7c41a9f414901 Mon Sep 17 00:00:00 2001 From: Talmaj Date: Fri, 8 May 2026 03:38:36 +0200 Subject: [PATCH 19/25] Add I2V for causal forcing model. (#13719) --- comfy/k_diffusion/sampling.py | 17 +++++++++++ comfy_extras/nodes_ar_video.py | 52 ++++++++++++++++++++++++++++++++++ 2 files changed, 69 insertions(+) diff --git a/comfy/k_diffusion/sampling.py b/comfy/k_diffusion/sampling.py index d33bc7199..c53ac4b2b 100644 --- a/comfy/k_diffusion/sampling.py +++ b/comfy/k_diffusion/sampling.py @@ -1859,6 +1859,23 @@ def sample_ar_video(model, x, sigmas, extra_args=None, callback=None, disable=No output = torch.zeros_like(x) s_in = x.new_ones([x.shape[0]]) current_start_frame = 0 + + # I2V: seed KV cache with the initial image latent before the denoising loop + initial_latent = transformer_options.get("ar_config", {}).get("initial_latent", None) + if initial_latent is not None: + initial_latent = inner_model.process_latent_in(initial_latent).to(device=device, dtype=model_dtype) + n_init = initial_latent.shape[2] + output[:, :, :n_init] = initial_latent + + ar_state = {"start_frame": 0, "kv_caches": kv_caches, "crossattn_caches": crossattn_caches} + transformer_options["ar_state"] = ar_state + zero_sigma = sigmas.new_zeros([1]) + _ = model(initial_latent, zero_sigma * s_in, **extra_args) + + current_start_frame = n_init + remaining = lat_t - n_init + num_blocks = -(-remaining // num_frame_per_block) + num_sigma_steps = len(sigmas) - 1 total_real_steps = num_blocks * num_sigma_steps step_count = 0 diff --git a/comfy_extras/nodes_ar_video.py b/comfy_extras/nodes_ar_video.py index 09ee886fd..b36588b14 100644 --- a/comfy_extras/nodes_ar_video.py +++ b/comfy_extras/nodes_ar_video.py @@ -2,6 +2,7 @@ ComfyUI nodes for autoregressive video generation (Causal Forcing, Self-Forcing, etc.). - EmptyARVideoLatent: create 5D [B, C, T, H, W] video latent tensors - SamplerARVideo: SAMPLER for the block-by-block autoregressive denoising loop + - ARVideoI2V: image-to-video conditioning for AR models (seeds KV cache with start image) """ import torch @@ -9,6 +10,7 @@ from typing_extensions import override import comfy.model_management import comfy.samplers +import comfy.utils from comfy_api.latest import ComfyExtension, io @@ -71,12 +73,62 @@ class SamplerARVideo(io.ComfyNode): return io.NodeOutput(comfy.samplers.ksampler("ar_video", extra_options)) +class ARVideoI2V(io.ComfyNode): + """Image-to-video setup for AR video models (Causal Forcing, Self-Forcing). + + VAE-encodes the start image and stores it in the model's transformer_options + so that sample_ar_video can seed the KV cache before denoising. + Uses the same T2V model checkpoint -- no separate I2V architecture needed. + """ + + @classmethod + def define_schema(cls): + return io.Schema( + node_id="ARVideoI2V", + category="conditioning/video_models", + inputs=[ + io.Model.Input("model"), + io.Vae.Input("vae"), + io.Image.Input("start_image"), + io.Int.Input("width", default=832, min=16, max=8192, step=16), + io.Int.Input("height", default=480, min=16, max=8192, step=16), + io.Int.Input("length", default=81, min=1, max=1024, step=4), + io.Int.Input("batch_size", default=1, min=1, max=64), + ], + outputs=[ + io.Model.Output(display_name="MODEL"), + io.Latent.Output(display_name="LATENT"), + ], + ) + + @classmethod + def execute(cls, model, vae, start_image, width, height, length, batch_size) -> io.NodeOutput: + start_image = comfy.utils.common_upscale( + start_image[:1].movedim(-1, 1), width, height, "bilinear", "center" + ).movedim(1, -1) + + initial_latent = vae.encode(start_image[:, :, :, :3]) + + m = model.clone() + to = m.model_options.setdefault("transformer_options", {}) + ar_cfg = to.setdefault("ar_config", {}) + ar_cfg["initial_latent"] = initial_latent + + lat_t = ((length - 1) // 4) + 1 + latent = torch.zeros( + [batch_size, 16, lat_t, height // 8, width // 8], + device=comfy.model_management.intermediate_device(), + ) + return io.NodeOutput(m, {"samples": latent}) + + class ARVideoExtension(ComfyExtension): @override async def get_node_list(self) -> list[type[io.ComfyNode]]: return [ EmptyARVideoLatent, SamplerARVideo, + ARVideoI2V, ] From df7bf1d3dc852365593786497123d92440ac1852 Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Thu, 7 May 2026 19:04:30 -0700 Subject: [PATCH 20/25] Update warning message for ComfyUI frontend installation. (#13796) --- app/frontend_management.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/frontend_management.py b/app/frontend_management.py index f753ef0de..7108bd35a 100644 --- a/app/frontend_management.py +++ b/app/frontend_management.py @@ -27,7 +27,7 @@ def frontend_install_warning_message(): return f""" {get_missing_requirements_message()} -This error is happening because the ComfyUI frontend is no longer shipped as part of the main repo but as a pip package instead. +The ComfyUI frontend is shipped in a pip package so it needs to be updated separately from the ComfyUI code. """.strip() def parse_version(version: str) -> tuple[int, int, int]: From c8673542f762910766691345401e09caef2bc9a6 Mon Sep 17 00:00:00 2001 From: Jedrzej Kosinski Date: Thu, 7 May 2026 19:21:12 -0700 Subject: [PATCH 21/25] fix: make NodeReplaceManager.register() idempotent (#13596) --- app/node_replace_manager.py | 20 ++++- .../app_test/node_replace_manager_test.py | 90 +++++++++++++++++++ 2 files changed, 108 insertions(+), 2 deletions(-) create mode 100644 tests-unit/app_test/node_replace_manager_test.py diff --git a/app/node_replace_manager.py b/app/node_replace_manager.py index d9aab5b22..72e8ac2b1 100644 --- a/app/node_replace_manager.py +++ b/app/node_replace_manager.py @@ -1,5 +1,7 @@ from __future__ import annotations +import logging + from aiohttp import web from typing import TYPE_CHECKING, TypedDict @@ -31,8 +33,22 @@ class NodeReplaceManager: self._replacements: dict[str, list[NodeReplace]] = {} def register(self, node_replace: NodeReplace): - """Register a node replacement mapping.""" - self._replacements.setdefault(node_replace.old_node_id, []).append(node_replace) + """Register a node replacement mapping. + + Idempotent: if a replacement with the same (old_node_id, new_node_id) + is already registered, the duplicate is ignored. This prevents stale + entries from accumulating when custom nodes are reloaded in the same + process (e.g. via ComfyUI-Manager). + """ + existing = self._replacements.setdefault(node_replace.old_node_id, []) + for entry in existing: + if entry.new_node_id == node_replace.new_node_id: + logging.debug( + "Node replacement %s -> %s already registered, ignoring duplicate.", + node_replace.old_node_id, node_replace.new_node_id, + ) + return + existing.append(node_replace) def get_replacement(self, old_node_id: str) -> list[NodeReplace] | None: """Get replacements for an old node ID.""" diff --git a/tests-unit/app_test/node_replace_manager_test.py b/tests-unit/app_test/node_replace_manager_test.py new file mode 100644 index 000000000..8a3fd18bb --- /dev/null +++ b/tests-unit/app_test/node_replace_manager_test.py @@ -0,0 +1,90 @@ +"""Tests for NodeReplaceManager registration behavior.""" +import importlib +import sys +import types + +import pytest + + +@pytest.fixture +def NodeReplaceManager(monkeypatch): + """Provide NodeReplaceManager with `nodes` stubbed. + + `app.node_replace_manager` does `import nodes` at module level, which pulls in + torch + the full ComfyUI graph. register() doesn't actually need it, so we + stub `nodes` per-test (via monkeypatch so it's torn down) and reload the + module so it picks up the stub instead of any cached real import. + """ + fake_nodes = types.ModuleType("nodes") + fake_nodes.NODE_CLASS_MAPPINGS = {} + monkeypatch.setitem(sys.modules, "nodes", fake_nodes) + monkeypatch.delitem(sys.modules, "app.node_replace_manager", raising=False) + module = importlib.import_module("app.node_replace_manager") + yield module.NodeReplaceManager + # Drop the freshly-imported module so the next test (or a later real import + # of `nodes`) starts from a clean slate. + sys.modules.pop("app.node_replace_manager", None) + + +class FakeNodeReplace: + """Lightweight stand-in for comfy_api.latest._io.NodeReplace.""" + def __init__(self, new_node_id, old_node_id, old_widget_ids=None, + input_mapping=None, output_mapping=None): + self.new_node_id = new_node_id + self.old_node_id = old_node_id + self.old_widget_ids = old_widget_ids + self.input_mapping = input_mapping + self.output_mapping = output_mapping + + +def test_register_adds_replacement(NodeReplaceManager): + manager = NodeReplaceManager() + manager.register(FakeNodeReplace(new_node_id="NewNode", old_node_id="OldNode")) + assert manager.has_replacement("OldNode") + assert len(manager.get_replacement("OldNode")) == 1 + + +def test_register_allows_multiple_alternatives_for_same_old_node(NodeReplaceManager): + """Different new_node_ids for the same old_node_id should all be kept.""" + manager = NodeReplaceManager() + manager.register(FakeNodeReplace(new_node_id="AltA", old_node_id="OldNode")) + manager.register(FakeNodeReplace(new_node_id="AltB", old_node_id="OldNode")) + replacements = manager.get_replacement("OldNode") + assert len(replacements) == 2 + assert {r.new_node_id for r in replacements} == {"AltA", "AltB"} + + +def test_register_is_idempotent_for_duplicate_pair(NodeReplaceManager): + """Re-registering the same (old_node_id, new_node_id) should be a no-op.""" + manager = NodeReplaceManager() + manager.register(FakeNodeReplace(new_node_id="NewNode", old_node_id="OldNode")) + manager.register(FakeNodeReplace(new_node_id="NewNode", old_node_id="OldNode")) + manager.register(FakeNodeReplace(new_node_id="NewNode", old_node_id="OldNode")) + assert len(manager.get_replacement("OldNode")) == 1 + + +def test_register_idempotent_preserves_first_registration(NodeReplaceManager): + """First registration wins; later duplicates with different mappings are ignored.""" + manager = NodeReplaceManager() + first = FakeNodeReplace( + new_node_id="NewNode", old_node_id="OldNode", + input_mapping=[{"new_id": "a", "old_id": "x"}], + ) + second = FakeNodeReplace( + new_node_id="NewNode", old_node_id="OldNode", + input_mapping=[{"new_id": "b", "old_id": "y"}], + ) + manager.register(first) + manager.register(second) + replacements = manager.get_replacement("OldNode") + assert len(replacements) == 1 + assert replacements[0] is first + + +def test_register_dedupe_does_not_affect_other_old_nodes(NodeReplaceManager): + manager = NodeReplaceManager() + manager.register(FakeNodeReplace(new_node_id="NewA", old_node_id="OldA")) + manager.register(FakeNodeReplace(new_node_id="NewA", old_node_id="OldA")) + manager.register(FakeNodeReplace(new_node_id="NewB", old_node_id="OldB")) + assert len(manager.get_replacement("OldA")) == 1 + assert len(manager.get_replacement("OldB")) == 1 From 594de378fe1d2e32128338f5cc57864ee1d9d96f Mon Sep 17 00:00:00 2001 From: Alexis Rolland Date: Fri, 8 May 2026 13:02:55 +0800 Subject: [PATCH 22/25] Update nodes categories and display names (CORE-89) (#13786) --- comfy_extras/nodes_advanced_samplers.py | 2 +- comfy_extras/nodes_attention_multiply.py | 8 ++++---- comfy_extras/nodes_audio_encoder.py | 1 + comfy_extras/nodes_camera_trajectory.py | 2 +- comfy_extras/nodes_cond.py | 4 ++-- comfy_extras/nodes_context_windows.py | 2 +- comfy_extras/nodes_custom_sampler.py | 4 ++-- comfy_extras/nodes_differential_diffusion.py | 2 +- comfy_extras/nodes_fresca.py | 2 +- comfy_extras/nodes_hunyuan.py | 4 ++++ comfy_extras/nodes_hunyuan3d.py | 6 ++++-- comfy_extras/nodes_hypernetwork.py | 1 + comfy_extras/nodes_lora_extract.py | 2 +- comfy_extras/nodes_lt.py | 3 ++- comfy_extras/nodes_mahiro.py | 2 +- comfy_extras/nodes_math.py | 2 +- comfy_extras/nodes_number_convert.py | 2 +- comfy_extras/nodes_perpneg.py | 7 ++++--- comfy_extras/nodes_photomaker.py | 4 ++-- comfy_extras/nodes_post_processing.py | 4 +++- comfy_extras/nodes_rtdetr.py | 4 ++-- comfy_extras/nodes_sag.py | 2 +- comfy_extras/nodes_sam3.py | 8 ++++---- comfy_extras/nodes_stable_cascade.py | 2 +- comfy_extras/nodes_textgen.py | 4 +++- comfy_extras/nodes_torch_compile.py | 2 +- comfy_extras/nodes_train.py | 2 +- comfy_extras/nodes_video_model.py | 2 +- custom_nodes/websocket_image_save.py | 6 +++++- nodes.py | 14 +++++++------ .../testing-pack/api_test_nodes.py | 4 ++-- .../testing-pack/async_test_nodes.py | 20 +++++++++---------- .../testing-pack/specific_tests.py | 6 +++--- 33 files changed, 80 insertions(+), 60 deletions(-) diff --git a/comfy_extras/nodes_advanced_samplers.py b/comfy_extras/nodes_advanced_samplers.py index 7f716cd76..7e8411fa4 100644 --- a/comfy_extras/nodes_advanced_samplers.py +++ b/comfy_extras/nodes_advanced_samplers.py @@ -92,7 +92,7 @@ class SamplerEulerCFGpp(io.ComfyNode): return io.Schema( node_id="SamplerEulerCFGpp", display_name="SamplerEulerCFG++", - category="_for_testing", # "sampling/custom_sampling/samplers" + category="experimental", # "sampling/custom_sampling/samplers" inputs=[ io.Combo.Input("version", options=["regular", "alternative"], advanced=True), ], diff --git a/comfy_extras/nodes_attention_multiply.py b/comfy_extras/nodes_attention_multiply.py index 060a5c9be..f4ee6a689 100644 --- a/comfy_extras/nodes_attention_multiply.py +++ b/comfy_extras/nodes_attention_multiply.py @@ -25,7 +25,7 @@ class UNetSelfAttentionMultiply(io.ComfyNode): def define_schema(cls) -> io.Schema: return io.Schema( node_id="UNetSelfAttentionMultiply", - category="_for_testing/attention_experiments", + category="experimental/attention_experiments", inputs=[ io.Model.Input("model"), io.Float.Input("q", default=1.0, min=0.0, max=10.0, step=0.01, advanced=True), @@ -48,7 +48,7 @@ class UNetCrossAttentionMultiply(io.ComfyNode): def define_schema(cls) -> io.Schema: return io.Schema( node_id="UNetCrossAttentionMultiply", - category="_for_testing/attention_experiments", + category="experimental/attention_experiments", inputs=[ io.Model.Input("model"), io.Float.Input("q", default=1.0, min=0.0, max=10.0, step=0.01, advanced=True), @@ -72,7 +72,7 @@ class CLIPAttentionMultiply(io.ComfyNode): return io.Schema( node_id="CLIPAttentionMultiply", search_aliases=["clip attention scale", "text encoder attention"], - category="_for_testing/attention_experiments", + category="experimental/attention_experiments", inputs=[ io.Clip.Input("clip"), io.Float.Input("q", default=1.0, min=0.0, max=10.0, step=0.01, advanced=True), @@ -106,7 +106,7 @@ class UNetTemporalAttentionMultiply(io.ComfyNode): def define_schema(cls) -> io.Schema: return io.Schema( node_id="UNetTemporalAttentionMultiply", - category="_for_testing/attention_experiments", + category="experimental/attention_experiments", inputs=[ io.Model.Input("model"), io.Float.Input("self_structural", default=1.0, min=0.0, max=10.0, step=0.01, advanced=True), diff --git a/comfy_extras/nodes_audio_encoder.py b/comfy_extras/nodes_audio_encoder.py index 13aacd41a..6a85da89b 100644 --- a/comfy_extras/nodes_audio_encoder.py +++ b/comfy_extras/nodes_audio_encoder.py @@ -10,6 +10,7 @@ class AudioEncoderLoader(io.ComfyNode): def define_schema(cls) -> io.Schema: return io.Schema( node_id="AudioEncoderLoader", + display_name="Load Audio Encoder", category="loaders", inputs=[ io.Combo.Input( diff --git a/comfy_extras/nodes_camera_trajectory.py b/comfy_extras/nodes_camera_trajectory.py index e7efa29ba..34b78e81b 100644 --- a/comfy_extras/nodes_camera_trajectory.py +++ b/comfy_extras/nodes_camera_trajectory.py @@ -153,7 +153,7 @@ class WanCameraEmbedding(io.ComfyNode): def define_schema(cls): return io.Schema( node_id="WanCameraEmbedding", - category="camera", + category="conditioning/video_models", inputs=[ io.Combo.Input( "camera_pose", diff --git a/comfy_extras/nodes_cond.py b/comfy_extras/nodes_cond.py index 86426a780..b745a43af 100644 --- a/comfy_extras/nodes_cond.py +++ b/comfy_extras/nodes_cond.py @@ -8,7 +8,7 @@ class CLIPTextEncodeControlnet(io.ComfyNode): def define_schema(cls) -> io.Schema: return io.Schema( node_id="CLIPTextEncodeControlnet", - category="_for_testing/conditioning", + category="experimental/conditioning", inputs=[ io.Clip.Input("clip"), io.Conditioning.Input("conditioning"), @@ -35,7 +35,7 @@ class T5TokenizerOptions(io.ComfyNode): def define_schema(cls) -> io.Schema: return io.Schema( node_id="T5TokenizerOptions", - category="_for_testing/conditioning", + category="experimental/conditioning", inputs=[ io.Clip.Input("clip"), io.Int.Input("min_padding", default=0, min=0, max=10000, step=1, advanced=True), diff --git a/comfy_extras/nodes_context_windows.py b/comfy_extras/nodes_context_windows.py index fefc56d26..f7ca833dc 100644 --- a/comfy_extras/nodes_context_windows.py +++ b/comfy_extras/nodes_context_windows.py @@ -10,7 +10,7 @@ class ContextWindowsManualNode(io.ComfyNode): return io.Schema( node_id="ContextWindowsManual", display_name="Context Windows (Manual)", - category="context", + category="model_patches", description="Manually set context windows.", inputs=[ io.Model.Input("model", tooltip="The model to apply context windows to during sampling."), diff --git a/comfy_extras/nodes_custom_sampler.py b/comfy_extras/nodes_custom_sampler.py index 1e957c09b..c67145d2d 100644 --- a/comfy_extras/nodes_custom_sampler.py +++ b/comfy_extras/nodes_custom_sampler.py @@ -984,7 +984,7 @@ class AddNoise(io.ComfyNode): def define_schema(cls): return io.Schema( node_id="AddNoise", - category="_for_testing/custom_sampling/noise", + category="experimental/custom_sampling/noise", is_experimental=True, inputs=[ io.Model.Input("model"), @@ -1034,7 +1034,7 @@ class ManualSigmas(io.ComfyNode): return io.Schema( node_id="ManualSigmas", search_aliases=["custom noise schedule", "define sigmas"], - category="_for_testing/custom_sampling", + category="experimental/custom_sampling", is_experimental=True, inputs=[ io.String.Input("sigmas", default="1, 0.5", multiline=False) diff --git a/comfy_extras/nodes_differential_diffusion.py b/comfy_extras/nodes_differential_diffusion.py index 34ffb9a89..4fa61ad0e 100644 --- a/comfy_extras/nodes_differential_diffusion.py +++ b/comfy_extras/nodes_differential_diffusion.py @@ -13,7 +13,7 @@ class DifferentialDiffusion(io.ComfyNode): node_id="DifferentialDiffusion", search_aliases=["inpaint gradient", "variable denoise strength"], display_name="Differential Diffusion", - category="_for_testing", + category="experimental", inputs=[ io.Model.Input("model"), io.Float.Input( diff --git a/comfy_extras/nodes_fresca.py b/comfy_extras/nodes_fresca.py index eab4f303f..173f42154 100644 --- a/comfy_extras/nodes_fresca.py +++ b/comfy_extras/nodes_fresca.py @@ -60,7 +60,7 @@ class FreSca(io.ComfyNode): node_id="FreSca", search_aliases=["frequency guidance"], display_name="FreSca", - category="_for_testing", + category="experimental", description="Applies frequency-dependent scaling to the guidance", inputs=[ io.Model.Input("model"), diff --git a/comfy_extras/nodes_hunyuan.py b/comfy_extras/nodes_hunyuan.py index 4ea93a499..9e4873be5 100644 --- a/comfy_extras/nodes_hunyuan.py +++ b/comfy_extras/nodes_hunyuan.py @@ -131,6 +131,8 @@ class HunyuanVideo15SuperResolution(io.ComfyNode): def define_schema(cls): return io.Schema( node_id="HunyuanVideo15SuperResolution", + display_name="Hunyuan Video 1.5 Super Resolution", + category="conditioning/video_models", inputs=[ io.Conditioning.Input("positive"), io.Conditioning.Input("negative"), @@ -381,6 +383,8 @@ class HunyuanRefinerLatent(io.ComfyNode): def define_schema(cls): return io.Schema( node_id="HunyuanRefinerLatent", + display_name="Hunyuan Latent Refiner", + category="conditioning/video_models", inputs=[ io.Conditioning.Input("positive"), io.Conditioning.Input("negative"), diff --git a/comfy_extras/nodes_hunyuan3d.py b/comfy_extras/nodes_hunyuan3d.py index fa55ead59..bf18ecb88 100644 --- a/comfy_extras/nodes_hunyuan3d.py +++ b/comfy_extras/nodes_hunyuan3d.py @@ -40,7 +40,7 @@ class Hunyuan3Dv2Conditioning(IO.ComfyNode): def define_schema(cls): return IO.Schema( node_id="Hunyuan3Dv2Conditioning", - category="conditioning/video_models", + category="conditioning/3d_models", inputs=[ IO.ClipVisionOutput.Input("clip_vision_output"), ], @@ -65,7 +65,7 @@ class Hunyuan3Dv2ConditioningMultiView(IO.ComfyNode): def define_schema(cls): return IO.Schema( node_id="Hunyuan3Dv2ConditioningMultiView", - category="conditioning/video_models", + category="conditioning/3d_models", inputs=[ IO.ClipVisionOutput.Input("front", optional=True), IO.ClipVisionOutput.Input("left", optional=True), @@ -424,6 +424,7 @@ class VoxelToMeshBasic(IO.ComfyNode): def define_schema(cls): return IO.Schema( node_id="VoxelToMeshBasic", + display_name="Voxel to Mesh (Basic)", category="3d", inputs=[ IO.Voxel.Input("voxel"), @@ -453,6 +454,7 @@ class VoxelToMesh(IO.ComfyNode): def define_schema(cls): return IO.Schema( node_id="VoxelToMesh", + display_name="Voxel to Mesh", category="3d", inputs=[ IO.Voxel.Input("voxel"), diff --git a/comfy_extras/nodes_hypernetwork.py b/comfy_extras/nodes_hypernetwork.py index 2a6a87a81..44a9c6f97 100644 --- a/comfy_extras/nodes_hypernetwork.py +++ b/comfy_extras/nodes_hypernetwork.py @@ -102,6 +102,7 @@ class HypernetworkLoader(IO.ComfyNode): def define_schema(cls): return IO.Schema( node_id="HypernetworkLoader", + display_name="Load Hypernetwork", category="loaders", inputs=[ IO.Model.Input("model"), diff --git a/comfy_extras/nodes_lora_extract.py b/comfy_extras/nodes_lora_extract.py index 975f90f45..bcd249c29 100644 --- a/comfy_extras/nodes_lora_extract.py +++ b/comfy_extras/nodes_lora_extract.py @@ -91,7 +91,7 @@ class LoraSave(io.ComfyNode): node_id="LoraSave", search_aliases=["export lora"], display_name="Extract and Save Lora", - category="_for_testing", + category="experimental", inputs=[ io.String.Input("filename_prefix", default="loras/ComfyUI_extracted_lora"), io.Int.Input("rank", default=8, min=1, max=4096, step=1, advanced=True), diff --git a/comfy_extras/nodes_lt.py b/comfy_extras/nodes_lt.py index 19d8a387f..ab1359fdb 100644 --- a/comfy_extras/nodes_lt.py +++ b/comfy_extras/nodes_lt.py @@ -594,7 +594,8 @@ class LTXVPreprocess(io.ComfyNode): def define_schema(cls): return io.Schema( node_id="LTXVPreprocess", - category="image", + display_name="LTXV Preprocess", + category="video/preprocessors", inputs=[ io.Image.Input("image"), io.Int.Input( diff --git a/comfy_extras/nodes_mahiro.py b/comfy_extras/nodes_mahiro.py index a25226e6d..7bd5f6652 100644 --- a/comfy_extras/nodes_mahiro.py +++ b/comfy_extras/nodes_mahiro.py @@ -11,7 +11,7 @@ class Mahiro(io.ComfyNode): return io.Schema( node_id="Mahiro", display_name="Positive-Biased Guidance", - category="_for_testing", + category="experimental", description="Modify the guidance to scale more on the 'direction' of the positive prompt rather than the difference between the negative prompt.", inputs=[ io.Model.Input("model"), diff --git a/comfy_extras/nodes_math.py b/comfy_extras/nodes_math.py index 6417bacf1..8f6e687d2 100644 --- a/comfy_extras/nodes_math.py +++ b/comfy_extras/nodes_math.py @@ -70,7 +70,7 @@ class MathExpressionNode(io.ComfyNode): return io.Schema( node_id="ComfyMathExpression", display_name="Math Expression", - category="math", + category="logic", search_aliases=[ "expression", "formula", "calculate", "calculator", "eval", "math", diff --git a/comfy_extras/nodes_number_convert.py b/comfy_extras/nodes_number_convert.py index cac7e736d..ab3f2aa8a 100644 --- a/comfy_extras/nodes_number_convert.py +++ b/comfy_extras/nodes_number_convert.py @@ -21,7 +21,7 @@ class NumberConvertNode(io.ComfyNode): return io.Schema( node_id="ComfyNumberConvert", display_name="Number Convert", - category="math", + category="utils", search_aliases=[ "int to float", "float to int", "number convert", "int2float", "float2int", "cast", "parse number", diff --git a/comfy_extras/nodes_perpneg.py b/comfy_extras/nodes_perpneg.py index ed1467de9..a7a72d1bc 100644 --- a/comfy_extras/nodes_perpneg.py +++ b/comfy_extras/nodes_perpneg.py @@ -24,8 +24,8 @@ class PerpNeg(io.ComfyNode): def define_schema(cls): return io.Schema( node_id="PerpNeg", - display_name="Perp-Neg (DEPRECATED by PerpNegGuider)", - category="_for_testing", + display_name="Perp-Neg (DEPRECATED by Perp-Neg Guider)", + category="experimental", inputs=[ io.Model.Input("model"), io.Conditioning.Input("empty_conditioning"), @@ -127,7 +127,8 @@ class PerpNegGuider(io.ComfyNode): def define_schema(cls): return io.Schema( node_id="PerpNegGuider", - category="_for_testing", + display_name="Perp-Neg Guider", + category="experimental", inputs=[ io.Model.Input("model"), io.Conditioning.Input("positive"), diff --git a/comfy_extras/nodes_photomaker.py b/comfy_extras/nodes_photomaker.py index 228183c07..8a2248572 100644 --- a/comfy_extras/nodes_photomaker.py +++ b/comfy_extras/nodes_photomaker.py @@ -123,7 +123,7 @@ class PhotoMakerLoader(io.ComfyNode): def define_schema(cls): return io.Schema( node_id="PhotoMakerLoader", - category="_for_testing/photomaker", + category="experimental/photomaker", inputs=[ io.Combo.Input("photomaker_model_name", options=folder_paths.get_filename_list("photomaker")), ], @@ -149,7 +149,7 @@ class PhotoMakerEncode(io.ComfyNode): def define_schema(cls): return io.Schema( node_id="PhotoMakerEncode", - category="_for_testing/photomaker", + category="experimental/photomaker", inputs=[ io.Photomaker.Input("photomaker"), io.Image.Input("image"), diff --git a/comfy_extras/nodes_post_processing.py b/comfy_extras/nodes_post_processing.py index d938a2035..1fa14d2d2 100644 --- a/comfy_extras/nodes_post_processing.py +++ b/comfy_extras/nodes_post_processing.py @@ -116,6 +116,7 @@ class Quantize(io.ComfyNode): def define_schema(cls): return io.Schema( node_id="ImageQuantize", + display_name="Quantize Image", category="image/postprocessing", inputs=[ io.Image.Input("image"), @@ -181,6 +182,7 @@ class Sharpen(io.ComfyNode): def define_schema(cls): return io.Schema( node_id="ImageSharpen", + display_name="Sharpen Image", category="image/postprocessing", inputs=[ io.Image.Input("image"), @@ -436,7 +438,7 @@ class ResizeImageMaskNode(io.ComfyNode): node_id="ResizeImageMaskNode", display_name="Resize Image/Mask", description="Resize an image or mask using various scaling methods.", - category="transform", + category="image/transform", search_aliases=["resize", "resize image", "resize mask", "scale", "scale image", "scale mask", "image resize", "change size", "dimensions", "shrink", "enlarge"], inputs=[ io.MatchType.Input("input", template=template), diff --git a/comfy_extras/nodes_rtdetr.py b/comfy_extras/nodes_rtdetr.py index 7feaf3ab3..a321577c7 100644 --- a/comfy_extras/nodes_rtdetr.py +++ b/comfy_extras/nodes_rtdetr.py @@ -15,7 +15,7 @@ class RTDETR_detect(io.ComfyNode): return io.Schema( node_id="RTDETR_detect", display_name="RT-DETR Detect", - category="detection/", + category="detection", search_aliases=["bbox", "bounding box", "object detection", "coco"], inputs=[ io.Model.Input("model", display_name="model"), @@ -71,7 +71,7 @@ class DrawBBoxes(io.ComfyNode): return io.Schema( node_id="DrawBBoxes", display_name="Draw BBoxes", - category="detection/", + category="detection", search_aliases=["bbox", "bounding box", "object detection", "rt_detr", "visualize detections", "coco"], inputs=[ io.Image.Input("image", optional=True), diff --git a/comfy_extras/nodes_sag.py b/comfy_extras/nodes_sag.py index d9c47851c..9dbf1b6f9 100644 --- a/comfy_extras/nodes_sag.py +++ b/comfy_extras/nodes_sag.py @@ -113,7 +113,7 @@ class SelfAttentionGuidance(io.ComfyNode): return io.Schema( node_id="SelfAttentionGuidance", display_name="Self-Attention Guidance", - category="_for_testing", + category="experimental", inputs=[ io.Model.Input("model"), io.Float.Input("scale", default=0.5, min=-2.0, max=5.0, step=0.01), diff --git a/comfy_extras/nodes_sam3.py b/comfy_extras/nodes_sam3.py index c460506bf..4ea9221e9 100644 --- a/comfy_extras/nodes_sam3.py +++ b/comfy_extras/nodes_sam3.py @@ -93,7 +93,7 @@ class SAM3_Detect(io.ComfyNode): return io.Schema( node_id="SAM3_Detect", display_name="SAM3 Detect", - category="detection/", + category="detection", search_aliases=["sam3", "segment anything", "open vocabulary", "text detection", "segment"], inputs=[ io.Model.Input("model", display_name="model"), @@ -265,7 +265,7 @@ class SAM3_VideoTrack(io.ComfyNode): return io.Schema( node_id="SAM3_VideoTrack", display_name="SAM3 Video Track", - category="detection/", + category="detection", search_aliases=["sam3", "video", "track", "propagate"], inputs=[ io.Image.Input("images", display_name="images", tooltip="Video frames as batched images"), @@ -320,7 +320,7 @@ class SAM3_TrackPreview(io.ComfyNode): return io.Schema( node_id="SAM3_TrackPreview", display_name="SAM3 Track Preview", - category="detection/", + category="detection", inputs=[ SAM3TrackData.Input("track_data", display_name="track_data"), io.Image.Input("images", display_name="images", optional=True), @@ -478,7 +478,7 @@ class SAM3_TrackToMask(io.ComfyNode): return io.Schema( node_id="SAM3_TrackToMask", display_name="SAM3 Track to Mask", - category="detection/", + category="detection", inputs=[ SAM3TrackData.Input("track_data", display_name="track_data"), io.String.Input("object_indices", display_name="object_indices", default="", diff --git a/comfy_extras/nodes_stable_cascade.py b/comfy_extras/nodes_stable_cascade.py index 8c1aebca9..0dc6c9fcd 100644 --- a/comfy_extras/nodes_stable_cascade.py +++ b/comfy_extras/nodes_stable_cascade.py @@ -119,7 +119,7 @@ class StableCascade_SuperResolutionControlnet(io.ComfyNode): def define_schema(cls): return io.Schema( node_id="StableCascade_SuperResolutionControlnet", - category="_for_testing/stable_cascade", + category="experimental/stable_cascade", is_experimental=True, inputs=[ io.Image.Input("image"), diff --git a/comfy_extras/nodes_textgen.py b/comfy_extras/nodes_textgen.py index 1661a1011..d52faf815 100644 --- a/comfy_extras/nodes_textgen.py +++ b/comfy_extras/nodes_textgen.py @@ -26,7 +26,8 @@ class TextGenerate(io.ComfyNode): return io.Schema( node_id="TextGenerate", - category="textgen", + display_name="Generate Text", + category="text", search_aliases=["LLM", "gemma"], inputs=[ io.Clip.Input("clip"), @@ -157,6 +158,7 @@ class TextGenerateLTX2Prompt(TextGenerate): parent_schema = super().define_schema() return io.Schema( node_id="TextGenerateLTX2Prompt", + display_name="Generate LTX2 Prompt", category=parent_schema.category, inputs=parent_schema.inputs, outputs=parent_schema.outputs, diff --git a/comfy_extras/nodes_torch_compile.py b/comfy_extras/nodes_torch_compile.py index c9e2e0026..d4506b1a9 100644 --- a/comfy_extras/nodes_torch_compile.py +++ b/comfy_extras/nodes_torch_compile.py @@ -10,7 +10,7 @@ class TorchCompileModel(io.ComfyNode): def define_schema(cls) -> io.Schema: return io.Schema( node_id="TorchCompileModel", - category="_for_testing", + category="experimental", inputs=[ io.Model.Input("model"), io.Combo.Input( diff --git a/comfy_extras/nodes_train.py b/comfy_extras/nodes_train.py index 0616dfc2d..e9871369b 100644 --- a/comfy_extras/nodes_train.py +++ b/comfy_extras/nodes_train.py @@ -1361,7 +1361,7 @@ class SaveLoRA(io.ComfyNode): node_id="SaveLoRA", search_aliases=["export lora"], display_name="Save LoRA Weights", - category="loaders", + category="advanced/model_merging", is_experimental=True, is_output_node=True, inputs=[ diff --git a/comfy_extras/nodes_video_model.py b/comfy_extras/nodes_video_model.py index bf98e6b82..0f3881a24 100644 --- a/comfy_extras/nodes_video_model.py +++ b/comfy_extras/nodes_video_model.py @@ -15,7 +15,7 @@ class ImageOnlyCheckpointLoader: RETURN_TYPES = ("MODEL", "CLIP_VISION", "VAE") FUNCTION = "load_checkpoint" - CATEGORY = "loaders/video_models" + CATEGORY = "loaders" def load_checkpoint(self, ckpt_name, output_vae=True, output_clip=True): ckpt_path = folder_paths.get_full_path_or_raise("checkpoints", ckpt_name) diff --git a/custom_nodes/websocket_image_save.py b/custom_nodes/websocket_image_save.py index 15f87f9f5..6a8646d0e 100644 --- a/custom_nodes/websocket_image_save.py +++ b/custom_nodes/websocket_image_save.py @@ -22,7 +22,7 @@ class SaveImageWebsocket: OUTPUT_NODE = True - CATEGORY = "api/image" + CATEGORY = "image" def save_images(self, images): pbar = comfy.utils.ProgressBar(images.shape[0]) @@ -42,3 +42,7 @@ class SaveImageWebsocket: NODE_CLASS_MAPPINGS = { "SaveImageWebsocket": SaveImageWebsocket, } + +NODE_DISPLAY_NAME_MAPPINGS = { + "SaveImageWebsocket": "Save Image (Websocket)", +} \ No newline at end of file diff --git a/nodes.py b/nodes.py index ad0cbc675..ae9e70cb9 100644 --- a/nodes.py +++ b/nodes.py @@ -330,7 +330,7 @@ class VAEDecodeTiled: RETURN_TYPES = ("IMAGE",) FUNCTION = "decode" - CATEGORY = "_for_testing" + CATEGORY = "experimental" def decode(self, vae, samples, tile_size, overlap=64, temporal_size=64, temporal_overlap=8): if tile_size < overlap * 4: @@ -377,7 +377,7 @@ class VAEEncodeTiled: RETURN_TYPES = ("LATENT",) FUNCTION = "encode" - CATEGORY = "_for_testing" + CATEGORY = "experimental" def encode(self, vae, pixels, tile_size, overlap, temporal_size=64, temporal_overlap=8): t = vae.encode_tiled(pixels, tile_x=tile_size, tile_y=tile_size, overlap=overlap, tile_t=temporal_size, overlap_t=temporal_overlap) @@ -493,7 +493,7 @@ class SaveLatent: OUTPUT_NODE = True - CATEGORY = "_for_testing" + CATEGORY = "experimental" def save(self, samples, filename_prefix="ComfyUI", prompt=None, extra_pnginfo=None): full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir) @@ -538,7 +538,7 @@ class LoadLatent: files = [f for f in os.listdir(input_dir) if os.path.isfile(os.path.join(input_dir, f)) and f.endswith(".latent")] return {"required": {"latent": [sorted(files), ]}, } - CATEGORY = "_for_testing" + CATEGORY = "experimental" RETURN_TYPES = ("LATENT", ) FUNCTION = "load" @@ -1443,7 +1443,7 @@ class LatentBlend: RETURN_TYPES = ("LATENT",) FUNCTION = "blend" - CATEGORY = "_for_testing" + CATEGORY = "experimental" def blend(self, samples1, samples2, blend_factor:float, blend_mode: str="normal"): @@ -2092,6 +2092,8 @@ NODE_DISPLAY_NAME_MAPPINGS = { "StyleModelLoader": "Load Style Model", "CLIPVisionLoader": "Load CLIP Vision", "UNETLoader": "Load Diffusion Model", + "unCLIPCheckpointLoader": "Load unCLIP Checkpoint", + "GLIGENLoader": "Load GLIGEN Model", # Conditioning "CLIPVisionEncode": "CLIP Vision Encode", "StyleModelApply": "Apply Style Model", @@ -2140,7 +2142,7 @@ NODE_DISPLAY_NAME_MAPPINGS = { "ImageSharpen": "Sharpen Image", "ImageScaleToTotalPixels": "Scale Image to Total Pixels", "GetImageSize": "Get Image Size", - # _for_testing + # experimental "VAEDecodeTiled": "VAE Decode (Tiled)", "VAEEncodeTiled": "VAE Encode (Tiled)", } diff --git a/tests/execution/testing_nodes/testing-pack/api_test_nodes.py b/tests/execution/testing_nodes/testing-pack/api_test_nodes.py index b2eaae05e..70c2a9e95 100644 --- a/tests/execution/testing_nodes/testing-pack/api_test_nodes.py +++ b/tests/execution/testing_nodes/testing-pack/api_test_nodes.py @@ -21,7 +21,7 @@ class TestAsyncProgressUpdate(ComfyNodeABC): RETURN_TYPES = (IO.ANY,) FUNCTION = "execute" - CATEGORY = "_for_testing/async" + CATEGORY = "experimental/async" async def execute(self, value, sleep_seconds): start = time.time() @@ -51,7 +51,7 @@ class TestSyncProgressUpdate(ComfyNodeABC): RETURN_TYPES = (IO.ANY,) FUNCTION = "execute" - CATEGORY = "_for_testing/async" + CATEGORY = "experimental/async" def execute(self, value, sleep_seconds): start = time.time() diff --git a/tests/execution/testing_nodes/testing-pack/async_test_nodes.py b/tests/execution/testing_nodes/testing-pack/async_test_nodes.py index 547eea6f4..589dabf17 100644 --- a/tests/execution/testing_nodes/testing-pack/async_test_nodes.py +++ b/tests/execution/testing_nodes/testing-pack/async_test_nodes.py @@ -21,7 +21,7 @@ class TestAsyncValidation(ComfyNodeABC): RETURN_TYPES = ("IMAGE",) FUNCTION = "process" - CATEGORY = "_for_testing/async" + CATEGORY = "experimental/async" @classmethod async def VALIDATE_INPUTS(cls, value, threshold): @@ -53,7 +53,7 @@ class TestAsyncError(ComfyNodeABC): RETURN_TYPES = (IO.ANY,) FUNCTION = "error_execution" - CATEGORY = "_for_testing/async" + CATEGORY = "experimental/async" async def error_execution(self, value, error_after): await asyncio.sleep(error_after) @@ -74,7 +74,7 @@ class TestAsyncValidationError(ComfyNodeABC): RETURN_TYPES = ("IMAGE",) FUNCTION = "process" - CATEGORY = "_for_testing/async" + CATEGORY = "experimental/async" @classmethod async def VALIDATE_INPUTS(cls, value, max_value): @@ -105,7 +105,7 @@ class TestAsyncTimeout(ComfyNodeABC): RETURN_TYPES = (IO.ANY,) FUNCTION = "timeout_execution" - CATEGORY = "_for_testing/async" + CATEGORY = "experimental/async" async def timeout_execution(self, value, timeout, operation_time): try: @@ -129,7 +129,7 @@ class TestSyncError(ComfyNodeABC): RETURN_TYPES = (IO.ANY,) FUNCTION = "sync_error" - CATEGORY = "_for_testing/async" + CATEGORY = "experimental/async" def sync_error(self, value): raise RuntimeError("Intentional sync execution error for testing") @@ -150,7 +150,7 @@ class TestAsyncLazyCheck(ComfyNodeABC): RETURN_TYPES = ("IMAGE",) FUNCTION = "process" - CATEGORY = "_for_testing/async" + CATEGORY = "experimental/async" async def check_lazy_status(self, condition, input1, input2): # Simulate async checking (e.g., querying remote service) @@ -184,7 +184,7 @@ class TestDynamicAsyncGeneration(ComfyNodeABC): RETURN_TYPES = ("IMAGE",) FUNCTION = "generate_async_workflow" - CATEGORY = "_for_testing/async" + CATEGORY = "experimental/async" def generate_async_workflow(self, image1, image2, num_async_nodes, sleep_duration): g = GraphBuilder() @@ -229,7 +229,7 @@ class TestAsyncResourceUser(ComfyNodeABC): RETURN_TYPES = (IO.ANY,) FUNCTION = "use_resource" - CATEGORY = "_for_testing/async" + CATEGORY = "experimental/async" async def use_resource(self, value, resource_id, duration): # Check if resource is already in use @@ -265,7 +265,7 @@ class TestAsyncBatchProcessing(ComfyNodeABC): RETURN_TYPES = ("IMAGE",) FUNCTION = "process_batch" - CATEGORY = "_for_testing/async" + CATEGORY = "experimental/async" async def process_batch(self, images, process_time_per_item, unique_id): batch_size = images.shape[0] @@ -305,7 +305,7 @@ class TestAsyncConcurrentLimit(ComfyNodeABC): RETURN_TYPES = (IO.ANY,) FUNCTION = "limited_execution" - CATEGORY = "_for_testing/async" + CATEGORY = "experimental/async" async def limited_execution(self, value, duration, node_id): async with self._semaphore: diff --git a/tests/execution/testing_nodes/testing-pack/specific_tests.py b/tests/execution/testing_nodes/testing-pack/specific_tests.py index 4f8f01ae4..2eb5d520e 100644 --- a/tests/execution/testing_nodes/testing-pack/specific_tests.py +++ b/tests/execution/testing_nodes/testing-pack/specific_tests.py @@ -409,7 +409,7 @@ class TestSleep(ComfyNodeABC): RETURN_TYPES = (IO.ANY,) FUNCTION = "sleep" - CATEGORY = "_for_testing" + CATEGORY = "experimental" async def sleep(self, value, seconds, unique_id): pbar = ProgressBar(seconds, node_id=unique_id) @@ -440,7 +440,7 @@ class TestParallelSleep(ComfyNodeABC): } RETURN_TYPES = ("IMAGE",) FUNCTION = "parallel_sleep" - CATEGORY = "_for_testing" + CATEGORY = "experimental" OUTPUT_NODE = True def parallel_sleep(self, image1, image2, image3, sleep1, sleep2, sleep3, unique_id): @@ -474,7 +474,7 @@ class TestOutputNodeWithSocketOutput: } RETURN_TYPES = ("IMAGE",) FUNCTION = "process" - CATEGORY = "_for_testing" + CATEGORY = "experimental" OUTPUT_NODE = True def process(self, image, value): From 56c74094c7c2ccbcf23f2aca1e4000199934da13 Mon Sep 17 00:00:00 2001 From: Alexander Piskun <13381981+bigcat88@users.noreply.github.com> Date: Fri, 8 May 2026 09:39:13 +0300 Subject: [PATCH 23/25] [Partner Nodes] use "adaptive" aspect ratio for SD2 nodes (#13800) Signed-off-by: bigcat88 --- comfy_api_nodes/nodes_bytedance.py | 27 ++++++++++++++++++++------- 1 file changed, 20 insertions(+), 7 deletions(-) diff --git a/comfy_api_nodes/nodes_bytedance.py b/comfy_api_nodes/nodes_bytedance.py index 2f241a775..5f74f4a14 100644 --- a/comfy_api_nodes/nodes_bytedance.py +++ b/comfy_api_nodes/nodes_bytedance.py @@ -1271,7 +1271,7 @@ PRICE_BADGE_VIDEO = IO.PriceBadge( ) -def _seedance2_text_inputs(resolutions: list[str]): +def _seedance2_text_inputs(resolutions: list[str], default_ratio: str = "16:9"): return [ IO.String.Input( "prompt", @@ -1287,6 +1287,7 @@ def _seedance2_text_inputs(resolutions: list[str]): IO.Combo.Input( "ratio", options=["16:9", "4:3", "1:1", "3:4", "9:16", "21:9", "adaptive"], + default=default_ratio, tooltip="Aspect ratio of the output video.", ), IO.Int.Input( @@ -1420,8 +1421,14 @@ class ByteDance2FirstLastFrameNode(IO.ComfyNode): IO.DynamicCombo.Input( "model", options=[ - IO.DynamicCombo.Option("Seedance 2.0", _seedance2_text_inputs(["480p", "720p", "1080p"])), - IO.DynamicCombo.Option("Seedance 2.0 Fast", _seedance2_text_inputs(["480p", "720p"])), + IO.DynamicCombo.Option( + "Seedance 2.0", + _seedance2_text_inputs(["480p", "720p", "1080p"], default_ratio="adaptive"), + ), + IO.DynamicCombo.Option( + "Seedance 2.0 Fast", + _seedance2_text_inputs(["480p", "720p"], default_ratio="adaptive"), + ), ], tooltip="Seedance 2.0 for maximum quality; Seedance 2.0 Fast for speed optimization.", ), @@ -1588,9 +1595,9 @@ class ByteDance2FirstLastFrameNode(IO.ComfyNode): return IO.NodeOutput(await download_url_to_video_output(response.content.video_url)) -def _seedance2_reference_inputs(resolutions: list[str]): +def _seedance2_reference_inputs(resolutions: list[str], default_ratio: str = "16:9"): return [ - *_seedance2_text_inputs(resolutions), + *_seedance2_text_inputs(resolutions, default_ratio=default_ratio), IO.Autogrow.Input( "reference_images", template=IO.Autogrow.TemplateNames( @@ -1668,8 +1675,14 @@ class ByteDance2ReferenceNode(IO.ComfyNode): IO.DynamicCombo.Input( "model", options=[ - IO.DynamicCombo.Option("Seedance 2.0", _seedance2_reference_inputs(["480p", "720p", "1080p"])), - IO.DynamicCombo.Option("Seedance 2.0 Fast", _seedance2_reference_inputs(["480p", "720p"])), + IO.DynamicCombo.Option( + "Seedance 2.0", + _seedance2_reference_inputs(["480p", "720p", "1080p"], default_ratio="adaptive"), + ), + IO.DynamicCombo.Option( + "Seedance 2.0 Fast", + _seedance2_reference_inputs(["480p", "720p"], default_ratio="adaptive"), + ), ], tooltip="Seedance 2.0 for maximum quality; Seedance 2.0 Fast for speed optimization.", ), From bac6fc35fbf3fb2a6fc7e54fce17203215bcfff5 Mon Sep 17 00:00:00 2001 From: omahs <73983677+omahs@users.noreply.github.com> Date: Fri, 8 May 2026 11:14:45 +0200 Subject: [PATCH 24/25] Fix typos (#10986) --- comfy/hooks.py | 2 +- comfy/ldm/modules/diffusionmodules/util.py | 2 +- comfy_extras/nodes_flux.py | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/comfy/hooks.py b/comfy/hooks.py index 1a76c7ba4..5458fc3d8 100644 --- a/comfy/hooks.py +++ b/comfy/hooks.py @@ -93,7 +93,7 @@ class Hook: self.hook_scope = hook_scope '''Scope of where this hook should apply in terms of the conds used in sampling run.''' self.custom_should_register = default_should_register - '''Can be overriden with a compatible function to decide if this hook should be registered without the need to override .should_register''' + '''Can be overridden with a compatible function to decide if this hook should be registered without the need to override .should_register''' @property def strength(self): diff --git a/comfy/ldm/modules/diffusionmodules/util.py b/comfy/ldm/modules/diffusionmodules/util.py index 233011dc9..aed5c149c 100644 --- a/comfy/ldm/modules/diffusionmodules/util.py +++ b/comfy/ldm/modules/diffusionmodules/util.py @@ -140,7 +140,7 @@ def make_ddim_sampling_parameters(alphacums, ddim_timesteps, eta, verbose=True): alphas = alphacums[ddim_timesteps] alphas_prev = np.asarray([alphacums[0]] + alphacums[ddim_timesteps[:-1]].tolist()) - # according the the formula provided in https://arxiv.org/abs/2010.02502 + # according to the formula provided in https://arxiv.org/abs/2010.02502 sigmas = eta * np.sqrt((1 - alphas_prev) / (1 - alphas) * (1 - alphas / alphas_prev)) if verbose: logging.info(f'Selected alphas for ddim sampler: a_t: {alphas}; a_(t-1): {alphas_prev}') diff --git a/comfy_extras/nodes_flux.py b/comfy_extras/nodes_flux.py index 3a23c7d04..5e04a5f77 100644 --- a/comfy_extras/nodes_flux.py +++ b/comfy_extras/nodes_flux.py @@ -102,7 +102,7 @@ class FluxDisableGuidance(io.ComfyNode): append = execute # TODO: remove -PREFERED_KONTEXT_RESOLUTIONS = [ +PREFERRED_KONTEXT_RESOLUTIONS = [ (672, 1568), (688, 1504), (720, 1456), @@ -143,7 +143,7 @@ class FluxKontextImageScale(io.ComfyNode): width = image.shape[2] height = image.shape[1] aspect_ratio = width / height - _, width, height = min((abs(aspect_ratio - w / h), w, h) for w, h in PREFERED_KONTEXT_RESOLUTIONS) + _, width, height = min((abs(aspect_ratio - w / h), w, h) for w, h in PREFERRED_KONTEXT_RESOLUTIONS) image = comfy.utils.common_upscale(image.movedim(-1, 1), width, height, "lanczos", "center").movedim(1, -1) return io.NodeOutput(image) From d3c18c163665a6f94e7dc56823aabcb93ebf7e5e Mon Sep 17 00:00:00 2001 From: "Yousef R. Gamaleldin" <81116377+yousef-rafat@users.noreply.github.com> Date: Fri, 8 May 2026 12:59:24 +0300 Subject: [PATCH 25/25] Add support for BiRefNet background remove model (CORE-46) (#12747) --- comfy/background_removal/birefnet.json | 7 + comfy/background_removal/birefnet.py | 689 ++++++++++++++++++ comfy/bg_removal_model.py | 78 ++ comfy/ops.py | 22 + comfy_api/latest/_io.py | 7 + comfy_extras/nodes_bg_removal.py | 60 ++ comfy_extras/nodes_mask.py | 27 +- folder_paths.py | 2 + .../put_background_removal_models_here | 0 nodes.py | 1 + 10 files changed, 887 insertions(+), 6 deletions(-) create mode 100644 comfy/background_removal/birefnet.json create mode 100644 comfy/background_removal/birefnet.py create mode 100644 comfy/bg_removal_model.py create mode 100644 comfy_extras/nodes_bg_removal.py create mode 100644 models/background_removal/put_background_removal_models_here diff --git a/comfy/background_removal/birefnet.json b/comfy/background_removal/birefnet.json new file mode 100644 index 000000000..f0960af39 --- /dev/null +++ b/comfy/background_removal/birefnet.json @@ -0,0 +1,7 @@ +{ + "model_type": "birefnet", + "image_std": [1.0, 1.0, 1.0], + "image_mean": [0.0, 0.0, 0.0], + "image_size": 1024, + "resize_to_original": true +} diff --git a/comfy/background_removal/birefnet.py b/comfy/background_removal/birefnet.py new file mode 100644 index 000000000..df54b2b90 --- /dev/null +++ b/comfy/background_removal/birefnet.py @@ -0,0 +1,689 @@ +import torch +import comfy.ops +import numpy as np +import torch.nn as nn +from functools import partial +import torch.nn.functional as F +from torchvision.ops import deform_conv2d +from comfy.ldm.modules.attention import optimized_attention_for_device + +CXT = [3072, 1536, 768, 384][1:][::-1][-3:] + +class Attention(nn.Module): + def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, device=None, dtype=None, operations=None): + super().__init__() + + self.dim = dim + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = qk_scale or head_dim ** -0.5 + + self.q = operations.Linear(dim, dim, bias=qkv_bias, device=device, dtype=dtype) + self.kv = operations.Linear(dim, dim * 2, bias=qkv_bias, device=device, dtype=dtype) + self.proj = operations.Linear(dim, dim, device=device, dtype=dtype) + + def forward(self, x): + B, N, C = x.shape + optimized_attention = optimized_attention_for_device(x.device, mask=False, small_input=True) + q = self.q(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) + kv = self.kv(x).reshape(B, -1, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + k, v = kv[0], kv[1] + + x = optimized_attention( + q, k, v, heads=self.num_heads, skip_output_reshape=True, skip_reshape=True + ).transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + + return x + +class Mlp(nn.Module): + def __init__(self, in_features, hidden_features=None, out_features=None, device=None, dtype=None, operations=None): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = operations.Linear(in_features, hidden_features, device=device, dtype=dtype) + self.act = nn.GELU() + self.fc2 = operations.Linear(hidden_features, out_features, device=device, dtype=dtype) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.fc2(x) + return x + + +def window_partition(x, window_size): + B, H, W, C = x.shape + x = x.view(B, H // window_size, window_size, W // window_size, window_size, C) + windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) + return windows + + +def window_reverse(windows, window_size, H, W): + B = int(windows.shape[0] / (H * W / window_size / window_size)) + x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1) + x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) + return x + + +class WindowAttention(nn.Module): + def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, device=None, dtype=None, operations=None): + + super().__init__() + self.dim = dim + self.window_size = window_size # Wh, Ww + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = qk_scale or head_dim ** -0.5 + + self.relative_position_bias_table = nn.Parameter( + torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads, device=device, dtype=dtype)) + + coords_h = torch.arange(self.window_size[0]) + coords_w = torch.arange(self.window_size[1]) + coords = torch.stack(torch.meshgrid([coords_h, coords_w], indexing='ij')) # 2, Wh, Ww + coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww + relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww + relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 + relative_coords[:, :, 0] += self.window_size[0] - 1 + relative_coords[:, :, 1] += self.window_size[1] - 1 + relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1 + relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww + self.register_buffer("relative_position_index", relative_position_index) + + self.qkv = operations.Linear(dim, dim * 3, bias=qkv_bias, device=device, dtype=dtype) + self.proj = operations.Linear(dim, dim, device=device, dtype=dtype) + self.softmax = nn.Softmax(dim=-1) + + def forward(self, x, mask=None): + B_, N, C = x.shape + qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[2] + + q = q * self.scale + attn = (q @ k.transpose(-2, -1)) + + relative_position_bias = self.relative_position_bias_table[self.relative_position_index.long().view(-1)].view( + self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH + relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww + attn = attn + relative_position_bias.unsqueeze(0) + + if mask is not None: + nW = mask.shape[0] + attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0) + attn = attn.view(-1, self.num_heads, N, N) + attn = self.softmax(attn) + else: + attn = self.softmax(attn) + + x = (attn @ v).transpose(1, 2).reshape(B_, N, C) + x = self.proj(x) + return x + + +class SwinTransformerBlock(nn.Module): + def __init__(self, dim, num_heads, window_size=7, shift_size=0, + mlp_ratio=4., qkv_bias=True, qk_scale=None, + norm_layer=nn.LayerNorm, device=None, dtype=None, operations=None): + super().__init__() + self.dim = dim + self.num_heads = num_heads + self.window_size = window_size + self.shift_size = shift_size + self.mlp_ratio = mlp_ratio + + self.norm1 = norm_layer(dim, device=device, dtype=dtype) + self.attn = WindowAttention( + dim, window_size=(self.window_size, self.window_size), num_heads=num_heads, + qkv_bias=qkv_bias, qk_scale=qk_scale, device=device, dtype=dtype, operations=operations) + + self.norm2 = norm_layer(dim, device=device, dtype=dtype) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, device=device, dtype=dtype, operations=operations) + + self.H = None + self.W = None + + def forward(self, x, mask_matrix): + B, L, C = x.shape + H, W = self.H, self.W + + shortcut = x + x = self.norm1(x) + x = x.view(B, H, W, C) + + pad_l = pad_t = 0 + pad_r = (self.window_size - W % self.window_size) % self.window_size + pad_b = (self.window_size - H % self.window_size) % self.window_size + x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b)) + _, Hp, Wp, _ = x.shape + + if self.shift_size > 0: + shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)) + attn_mask = mask_matrix + else: + shifted_x = x + attn_mask = None + + x_windows = window_partition(shifted_x, self.window_size) + x_windows = x_windows.view(-1, self.window_size * self.window_size, C) + + attn_windows = self.attn(x_windows, mask=attn_mask) + + attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C) + shifted_x = window_reverse(attn_windows, self.window_size, Hp, Wp) # B H' W' C + + if self.shift_size > 0: + x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2)) + else: + x = shifted_x + + if pad_r > 0 or pad_b > 0: + x = x[:, :H, :W, :].contiguous() + + x = x.view(B, H * W, C) + + x = shortcut + x + x = x + self.mlp(self.norm2(x)) + + return x + + +class PatchMerging(nn.Module): + def __init__(self, dim, device=None, dtype=None, operations=None): + super().__init__() + self.dim = dim + self.reduction = operations.Linear(4 * dim, 2 * dim, bias=False, device=device, dtype=dtype) + self.norm = operations.LayerNorm(4 * dim, device=device, dtype=dtype) + + def forward(self, x, H, W): + B, L, C = x.shape + x = x.view(B, H, W, C) + + # padding + pad_input = (H % 2 == 1) or (W % 2 == 1) + if pad_input: + x = F.pad(x, (0, 0, 0, W % 2, 0, H % 2)) + + x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C + x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C + x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C + x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C + x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C + x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C + + x = self.norm(x) + x = self.reduction(x) + + return x + + +class BasicLayer(nn.Module): + def __init__(self, + dim, + depth, + num_heads, + window_size=7, + mlp_ratio=4., + qkv_bias=True, + qk_scale=None, + norm_layer=nn.LayerNorm, + downsample=None, + device=None, dtype=None, operations=None): + super().__init__() + self.window_size = window_size + self.shift_size = window_size // 2 + self.depth = depth + + # build blocks + self.blocks = nn.ModuleList([ + SwinTransformerBlock( + dim=dim, + num_heads=num_heads, + window_size=window_size, + shift_size=0 if (i % 2 == 0) else window_size // 2, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + norm_layer=norm_layer, + device=device, dtype=dtype, operations=operations) + for i in range(depth)]) + + # patch merging layer + if downsample is not None: + self.downsample = downsample(dim=dim, device=device, dtype=dtype, operations=operations) + else: + self.downsample = None + + def forward(self, x, H, W): + Hp = int(np.ceil(H / self.window_size)) * self.window_size + Wp = int(np.ceil(W / self.window_size)) * self.window_size + img_mask = torch.zeros((1, Hp, Wp, 1), device=x.device) # 1 Hp Wp 1 + h_slices = (slice(0, -self.window_size), + slice(-self.window_size, -self.shift_size), + slice(-self.shift_size, None)) + w_slices = (slice(0, -self.window_size), + slice(-self.window_size, -self.shift_size), + slice(-self.shift_size, None)) + cnt = 0 + for h in h_slices: + for w in w_slices: + img_mask[:, h, w, :] = cnt + cnt += 1 + + mask_windows = window_partition(img_mask, self.window_size) + mask_windows = mask_windows.view(-1, self.window_size * self.window_size) + attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) + attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0)) + + for blk in self.blocks: + blk.H, blk.W = H, W + x = blk(x, attn_mask) + if self.downsample is not None: + x_down = self.downsample(x, H, W) + Wh, Ww = (H + 1) // 2, (W + 1) // 2 + return x, H, W, x_down, Wh, Ww + else: + return x, H, W, x, H, W + + +class PatchEmbed(nn.Module): + def __init__(self, patch_size=4, in_channels=3, embed_dim=96, norm_layer=None, device=None, dtype=None, operations=None): + super().__init__() + patch_size = (patch_size, patch_size) + self.patch_size = patch_size + + self.in_channels = in_channels + self.embed_dim = embed_dim + + self.proj = operations.Conv2d(in_channels, embed_dim, kernel_size=patch_size, stride=patch_size, device=device, dtype=dtype) + if norm_layer is not None: + self.norm = norm_layer(embed_dim, device=device, dtype=dtype) + else: + self.norm = None + + def forward(self, x): + _, _, H, W = x.size() + if W % self.patch_size[1] != 0: + x = F.pad(x, (0, self.patch_size[1] - W % self.patch_size[1])) + if H % self.patch_size[0] != 0: + x = F.pad(x, (0, 0, 0, self.patch_size[0] - H % self.patch_size[0])) + + x = self.proj(x) # B C Wh Ww + if self.norm is not None: + Wh, Ww = x.size(2), x.size(3) + x = x.flatten(2).transpose(1, 2) + x = self.norm(x) + x = x.transpose(1, 2).view(-1, self.embed_dim, Wh, Ww) + + return x + + +class SwinTransformer(nn.Module): + def __init__(self, + pretrain_img_size=224, + patch_size=4, + in_channels=3, + embed_dim=96, + depths=[2, 2, 6, 2], + num_heads=[3, 6, 12, 24], + window_size=7, + mlp_ratio=4., + qkv_bias=True, + qk_scale=None, + patch_norm=True, + out_indices=(0, 1, 2, 3), + frozen_stages=-1, + device=None, dtype=None, operations=None): + super().__init__() + + norm_layer = partial(operations.LayerNorm, device=device, dtype=dtype) + self.pretrain_img_size = pretrain_img_size + self.num_layers = len(depths) + self.embed_dim = embed_dim + self.patch_norm = patch_norm + self.out_indices = out_indices + self.frozen_stages = frozen_stages + + self.patch_embed = PatchEmbed( + patch_size=patch_size, in_channels=in_channels, embed_dim=embed_dim, + device=device, dtype=dtype, operations=operations, + norm_layer=norm_layer if self.patch_norm else None) + + self.layers = nn.ModuleList() + for i_layer in range(self.num_layers): + layer = BasicLayer( + dim=int(embed_dim * 2 ** i_layer), + depth=depths[i_layer], + num_heads=num_heads[i_layer], + window_size=window_size, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + norm_layer=norm_layer, + downsample=PatchMerging if (i_layer < self.num_layers - 1) else None, + device=device, dtype=dtype, operations=operations) + self.layers.append(layer) + + num_features = [int(embed_dim * 2 ** i) for i in range(self.num_layers)] + self.num_features = num_features + + for i_layer in out_indices: + layer = norm_layer(num_features[i_layer]) + layer_name = f'norm{i_layer}' + self.add_module(layer_name, layer) + + + def forward(self, x): + x = self.patch_embed(x) + + Wh, Ww = x.size(2), x.size(3) + + outs = [] + x = x.flatten(2).transpose(1, 2) + for i in range(self.num_layers): + layer = self.layers[i] + x_out, H, W, x, Wh, Ww = layer(x, Wh, Ww) + + if i in self.out_indices: + norm_layer = getattr(self, f'norm{i}') + x_out = norm_layer(x_out) + + out = x_out.view(-1, H, W, self.num_features[i]).permute(0, 3, 1, 2).contiguous() + outs.append(out) + + return tuple(outs) + +class DeformableConv2d(nn.Module): + def __init__(self, + in_channels, + out_channels, + kernel_size=3, + stride=1, + padding=1, + bias=False, device=None, dtype=None, operations=None): + + super(DeformableConv2d, self).__init__() + + kernel_size = kernel_size if type(kernel_size) is tuple else (kernel_size, kernel_size) + self.stride = stride if type(stride) is tuple else (stride, stride) + self.padding = padding + + self.offset_conv = operations.Conv2d(in_channels, + 2 * kernel_size[0] * kernel_size[1], + kernel_size=kernel_size, + stride=stride, + padding=self.padding, + bias=True, device=device, dtype=dtype) + + self.modulator_conv = operations.Conv2d(in_channels, + 1 * kernel_size[0] * kernel_size[1], + kernel_size=kernel_size, + stride=stride, + padding=self.padding, + bias=True, device=device, dtype=dtype) + + self.regular_conv = operations.Conv2d(in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=self.padding, + bias=bias, device=device, dtype=dtype) + + def forward(self, x): + offset = self.offset_conv(x) + modulator = 2. * torch.sigmoid(self.modulator_conv(x)) + weight, bias, offload_info = comfy.ops.cast_bias_weight(self.regular_conv, x, offloadable=True) + + x = deform_conv2d( + input=x, + offset=offset, + weight=weight, + bias=None, + padding=self.padding, + mask=modulator, + stride=self.stride, + ) + comfy.ops.uncast_bias_weight(self.regular_conv, weight, bias, offload_info) + return x + +class BasicDecBlk(nn.Module): + def __init__(self, in_channels=64, out_channels=64, inter_channels=64, device=None, dtype=None, operations=None): + super(BasicDecBlk, self).__init__() + inter_channels = 64 + self.conv_in = operations.Conv2d(in_channels, inter_channels, 3, 1, padding=1, device=device, dtype=dtype) + self.relu_in = nn.ReLU(inplace=True) + self.dec_att = ASPPDeformable(in_channels=inter_channels, device=device, dtype=dtype, operations=operations) + self.conv_out = operations.Conv2d(inter_channels, out_channels, 3, 1, padding=1, device=device, dtype=dtype) + self.bn_in = operations.BatchNorm2d(inter_channels, device=device, dtype=dtype) + self.bn_out = operations.BatchNorm2d(out_channels, device=device, dtype=dtype) + + def forward(self, x): + x = self.conv_in(x) + x = self.bn_in(x) + x = self.relu_in(x) + x = self.dec_att(x) + x = self.conv_out(x) + x = self.bn_out(x) + return x + + +class BasicLatBlk(nn.Module): + def __init__(self, in_channels=64, out_channels=64, device=None, dtype=None, operations=None): + super(BasicLatBlk, self).__init__() + self.conv = operations.Conv2d(in_channels, out_channels, 1, 1, 0, device=device, dtype=dtype) + + def forward(self, x): + x = self.conv(x) + return x + + +class _ASPPModuleDeformable(nn.Module): + def __init__(self, in_channels, planes, kernel_size, padding, device, dtype, operations): + super(_ASPPModuleDeformable, self).__init__() + self.atrous_conv = DeformableConv2d(in_channels, planes, kernel_size=kernel_size, + stride=1, padding=padding, bias=False, device=device, dtype=dtype, operations=operations) + self.bn = operations.BatchNorm2d(planes, device=device, dtype=dtype) + self.relu = nn.ReLU(inplace=True) + + def forward(self, x): + x = self.atrous_conv(x) + x = self.bn(x) + + return self.relu(x) + + +class ASPPDeformable(nn.Module): + def __init__(self, in_channels, out_channels=None, parallel_block_sizes=[1, 3, 7], device=None, dtype=None, operations=None): + super(ASPPDeformable, self).__init__() + self.down_scale = 1 + if out_channels is None: + out_channels = in_channels + self.in_channelster = 256 // self.down_scale + + self.aspp1 = _ASPPModuleDeformable(in_channels, self.in_channelster, 1, padding=0, device=device, dtype=dtype, operations=operations) + self.aspp_deforms = nn.ModuleList([ + _ASPPModuleDeformable(in_channels, self.in_channelster, conv_size, padding=int(conv_size//2), device=device, dtype=dtype, operations=operations) + for conv_size in parallel_block_sizes + ]) + + self.global_avg_pool = nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)), + operations.Conv2d(in_channels, self.in_channelster, 1, stride=1, bias=False, device=device, dtype=dtype), + operations.BatchNorm2d(self.in_channelster, device=device, dtype=dtype), + nn.ReLU(inplace=True)) + self.conv1 = operations.Conv2d(self.in_channelster * (2 + len(self.aspp_deforms)), out_channels, 1, bias=False, device=device, dtype=dtype) + self.bn1 = operations.BatchNorm2d(out_channels, device=device, dtype=dtype) + self.relu = nn.ReLU(inplace=True) + + def forward(self, x): + x1 = self.aspp1(x) + x_aspp_deforms = [aspp_deform(x) for aspp_deform in self.aspp_deforms] + x5 = self.global_avg_pool(x) + x5 = F.interpolate(x5, size=x1.size()[2:], mode='bilinear', align_corners=True) + x = torch.cat((x1, *x_aspp_deforms, x5), dim=1) + + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + + return x + +class BiRefNet(nn.Module): + def __init__(self, config=None, dtype=None, device=None, operations=None): + super(BiRefNet, self).__init__() + self.bb = SwinTransformer(embed_dim=192, depths=[2, 2, 18, 2], num_heads=[6, 12, 24, 48], window_size=12, device=device, dtype=dtype, operations=operations) + + channels = [1536, 768, 384, 192] + channels = [c * 2 for c in channels] + self.cxt = channels[1:][::-1][-3:] + self.squeeze_module = nn.Sequential(*[ + BasicDecBlk(channels[0]+sum(self.cxt), channels[0], device=device, dtype=dtype, operations=operations) + for _ in range(1) + ]) + + self.decoder = Decoder(channels, device=device, dtype=dtype, operations=operations) + + def forward_enc(self, x): + x1, x2, x3, x4 = self.bb(x) + B, C, H, W = x.shape + x1_, x2_, x3_, x4_ = self.bb(F.interpolate(x, size=(H//2, W//2), mode='bilinear', align_corners=True)) + x1 = torch.cat([x1, F.interpolate(x1_, size=x1.shape[2:], mode='bilinear', align_corners=True)], dim=1) + x2 = torch.cat([x2, F.interpolate(x2_, size=x2.shape[2:], mode='bilinear', align_corners=True)], dim=1) + x3 = torch.cat([x3, F.interpolate(x3_, size=x3.shape[2:], mode='bilinear', align_corners=True)], dim=1) + x4 = torch.cat([x4, F.interpolate(x4_, size=x4.shape[2:], mode='bilinear', align_corners=True)], dim=1) + x4 = torch.cat( + ( + *[ + F.interpolate(x1, size=x4.shape[2:], mode='bilinear', align_corners=True), + F.interpolate(x2, size=x4.shape[2:], mode='bilinear', align_corners=True), + F.interpolate(x3, size=x4.shape[2:], mode='bilinear', align_corners=True), + ][-len(CXT):], + x4 + ), + dim=1 + ) + return (x1, x2, x3, x4) + + def forward_ori(self, x): + (x1, x2, x3, x4) = self.forward_enc(x) + x4 = self.squeeze_module(x4) + features = [x, x1, x2, x3, x4] + scaled_preds = self.decoder(features) + return scaled_preds + + def forward(self, pixel_values, intermediate_output=None): + scaled_preds = self.forward_ori(pixel_values) + return scaled_preds + + +class Decoder(nn.Module): + def __init__(self, channels, device, dtype, operations): + super(Decoder, self).__init__() + # factory kwargs + fk = {"device":device, "dtype":dtype, "operations":operations} + DecoderBlock = partial(BasicDecBlk, **fk) + LateralBlock = partial(BasicLatBlk, **fk) + DBlock = partial(SimpleConvs, **fk) + + self.split = True + N_dec_ipt = 64 + ic = 64 + ipt_cha_opt = 1 + self.ipt_blk5 = DBlock(2**10*3 if self.split else 3, [N_dec_ipt, channels[0]//8][ipt_cha_opt], inter_channels=ic) + self.ipt_blk4 = DBlock(2**8*3 if self.split else 3, [N_dec_ipt, channels[0]//8][ipt_cha_opt], inter_channels=ic) + self.ipt_blk3 = DBlock(2**6*3 if self.split else 3, [N_dec_ipt, channels[1]//8][ipt_cha_opt], inter_channels=ic) + self.ipt_blk2 = DBlock(2**4*3 if self.split else 3, [N_dec_ipt, channels[2]//8][ipt_cha_opt], inter_channels=ic) + self.ipt_blk1 = DBlock(2**0*3 if self.split else 3, [N_dec_ipt, channels[3]//8][ipt_cha_opt], inter_channels=ic) + + self.decoder_block4 = DecoderBlock(channels[0]+([N_dec_ipt, channels[0]//8][ipt_cha_opt]), channels[1]) + self.decoder_block3 = DecoderBlock(channels[1]+([N_dec_ipt, channels[0]//8][ipt_cha_opt]), channels[2]) + self.decoder_block2 = DecoderBlock(channels[2]+([N_dec_ipt, channels[1]//8][ipt_cha_opt]), channels[3]) + self.decoder_block1 = DecoderBlock(channels[3]+([N_dec_ipt, channels[2]//8][ipt_cha_opt]), channels[3]//2) + + fk = {"device":device, "dtype":dtype} + + self.conv_out1 = nn.Sequential(operations.Conv2d(channels[3]//2+([N_dec_ipt, channels[3]//8][ipt_cha_opt]), 1, 1, 1, 0, **fk)) + + self.lateral_block4 = LateralBlock(channels[1], channels[1]) + self.lateral_block3 = LateralBlock(channels[2], channels[2]) + self.lateral_block2 = LateralBlock(channels[3], channels[3]) + + self.conv_ms_spvn_4 = operations.Conv2d(channels[1], 1, 1, 1, 0, **fk) + self.conv_ms_spvn_3 = operations.Conv2d(channels[2], 1, 1, 1, 0, **fk) + self.conv_ms_spvn_2 = operations.Conv2d(channels[3], 1, 1, 1, 0, **fk) + + _N = 16 + + self.gdt_convs_4 = nn.Sequential(operations.Conv2d(channels[0] // 2, _N, 3, 1, 1, **fk), operations.BatchNorm2d(_N, **fk), nn.ReLU(inplace=True)) + self.gdt_convs_3 = nn.Sequential(operations.Conv2d(channels[1] // 2, _N, 3, 1, 1, **fk), operations.BatchNorm2d(_N, **fk), nn.ReLU(inplace=True)) + self.gdt_convs_2 = nn.Sequential(operations.Conv2d(channels[2] // 2, _N, 3, 1, 1, **fk), operations.BatchNorm2d(_N, **fk), nn.ReLU(inplace=True)) + + [setattr(self, f"gdt_convs_pred_{i}", nn.Sequential(operations.Conv2d(_N, 1, 1, 1, 0, **fk))) for i in range(2, 5)] + [setattr(self, f"gdt_convs_attn_{i}", nn.Sequential(operations.Conv2d(_N, 1, 1, 1, 0, **fk))) for i in range(2, 5)] + + def get_patches_batch(self, x, p): + _size_h, _size_w = p.shape[2:] + patches_batch = [] + for idx in range(x.shape[0]): + columns_x = torch.split(x[idx], split_size_or_sections=_size_w, dim=-1) + patches_x = [] + for column_x in columns_x: + patches_x += [p.unsqueeze(0) for p in torch.split(column_x, split_size_or_sections=_size_h, dim=-2)] + patch_sample = torch.cat(patches_x, dim=1) + patches_batch.append(patch_sample) + return torch.cat(patches_batch, dim=0) + + def forward(self, features): + x, x1, x2, x3, x4 = features + + patches_batch = self.get_patches_batch(x, x4) if self.split else x + x4 = torch.cat((x4, self.ipt_blk5(F.interpolate(patches_batch, size=x4.shape[2:], mode='bilinear', align_corners=True))), 1) + p4 = self.decoder_block4(x4) + p4_gdt = self.gdt_convs_4(p4) + gdt_attn_4 = self.gdt_convs_attn_4(p4_gdt).sigmoid() + p4 = p4 * gdt_attn_4 + _p4 = F.interpolate(p4, size=x3.shape[2:], mode='bilinear', align_corners=True) + _p3 = _p4 + self.lateral_block4(x3) + + patches_batch = self.get_patches_batch(x, _p3) if self.split else x + _p3 = torch.cat((_p3, self.ipt_blk4(F.interpolate(patches_batch, size=x3.shape[2:], mode='bilinear', align_corners=True))), 1) + p3 = self.decoder_block3(_p3) + + p3_gdt = self.gdt_convs_3(p3) + gdt_attn_3 = self.gdt_convs_attn_3(p3_gdt).sigmoid() + p3 = p3 * gdt_attn_3 + _p3 = F.interpolate(p3, size=x2.shape[2:], mode='bilinear', align_corners=True) + _p2 = _p3 + self.lateral_block3(x2) + + patches_batch = self.get_patches_batch(x, _p2) if self.split else x + _p2 = torch.cat((_p2, self.ipt_blk3(F.interpolate(patches_batch, size=x2.shape[2:], mode='bilinear', align_corners=True))), 1) + p2 = self.decoder_block2(_p2) + + p2_gdt = self.gdt_convs_2(p2) + gdt_attn_2 = self.gdt_convs_attn_2(p2_gdt).sigmoid() + p2 = p2 * gdt_attn_2 + + _p2 = F.interpolate(p2, size=x1.shape[2:], mode='bilinear', align_corners=True) + _p1 = _p2 + self.lateral_block2(x1) + + patches_batch = self.get_patches_batch(x, _p1) if self.split else x + _p1 = torch.cat((_p1, self.ipt_blk2(F.interpolate(patches_batch, size=x1.shape[2:], mode='bilinear', align_corners=True))), 1) + _p1 = self.decoder_block1(_p1) + _p1 = F.interpolate(_p1, size=x.shape[2:], mode='bilinear', align_corners=True) + + patches_batch = self.get_patches_batch(x, _p1) if self.split else x + _p1 = torch.cat((_p1, self.ipt_blk1(F.interpolate(patches_batch, size=x.shape[2:], mode='bilinear', align_corners=True))), 1) + p1_out = self.conv_out1(_p1) + return p1_out + + +class SimpleConvs(nn.Module): + def __init__( + self, in_channels: int, out_channels: int, inter_channels=64, device=None, dtype=None, operations=None + ) -> None: + super().__init__() + self.conv1 = operations.Conv2d(in_channels, inter_channels, 3, 1, 1, device=device, dtype=dtype) + self.conv_out = operations.Conv2d(inter_channels, out_channels, 3, 1, 1, device=device, dtype=dtype) + + def forward(self, x): + return self.conv_out(self.conv1(x)) diff --git a/comfy/bg_removal_model.py b/comfy/bg_removal_model.py new file mode 100644 index 000000000..cb7c2ee53 --- /dev/null +++ b/comfy/bg_removal_model.py @@ -0,0 +1,78 @@ +from .utils import load_torch_file +import os +import json +import torch +import logging + +import comfy.ops +import comfy.model_patcher +import comfy.model_management +import comfy.clip_model +import comfy.background_removal.birefnet + +BG_REMOVAL_MODELS = { + "birefnet": comfy.background_removal.birefnet.BiRefNet +} + +class BackgroundRemovalModel(): + def __init__(self, json_config): + with open(json_config) as f: + config = json.load(f) + + self.image_size = config.get("image_size", 1024) + self.image_mean = config.get("image_mean", [0.0, 0.0, 0.0]) + self.image_std = config.get("image_std", [1.0, 1.0, 1.0]) + self.model_type = config.get("model_type", "birefnet") + self.config = config.copy() + model_class = BG_REMOVAL_MODELS.get(self.model_type) + + self.load_device = comfy.model_management.text_encoder_device() + offload_device = comfy.model_management.text_encoder_offload_device() + self.dtype = comfy.model_management.text_encoder_dtype(self.load_device) + self.model = model_class(config, self.dtype, offload_device, comfy.ops.manual_cast) + self.model.eval() + + self.patcher = comfy.model_patcher.CoreModelPatcher(self.model, load_device=self.load_device, offload_device=offload_device) + + def load_sd(self, sd): + return self.model.load_state_dict(sd, strict=False, assign=self.patcher.is_dynamic()) + + def get_sd(self): + return self.model.state_dict() + + def encode_image(self, image): + comfy.model_management.load_model_gpu(self.patcher) + H, W = image.shape[1], image.shape[2] + pixel_values = comfy.clip_model.clip_preprocess(image.to(self.load_device), size=self.image_size, mean=self.image_mean, std=self.image_std, crop=False) + out = self.model(pixel_values=pixel_values) + out = torch.nn.functional.interpolate(out, size=(H, W), mode="bicubic", antialias=False) + + mask = out.sigmoid() + if mask.ndim == 3: + mask = mask.unsqueeze(0) + if mask.shape[1] != 1: + mask = mask.movedim(-1, 1) + + return mask + + +def load_background_removal_model(sd): + if "bb.layers.1.blocks.0.attn.relative_position_index" in sd: + json_config = os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "background_removal"), "birefnet.json") + else: + return None + + bg_model = BackgroundRemovalModel(json_config) + m, u = bg_model.load_sd(sd) + if len(m) > 0: + logging.warning("missing background removal: {}".format(m)) + u = set(u) + keys = list(sd.keys()) + for k in keys: + if k not in u: + sd.pop(k) + return bg_model + +def load(ckpt_path): + sd = load_torch_file(ckpt_path) + return load_background_removal_model(sd) diff --git a/comfy/ops.py b/comfy/ops.py index 585c185a3..77ad1d527 100644 --- a/comfy/ops.py +++ b/comfy/ops.py @@ -562,6 +562,25 @@ class disable_weight_init: else: return super().forward(*args, **kwargs) + class BatchNorm2d(torch.nn.BatchNorm2d, CastWeightBiasOp): + def reset_parameters(self): + return None + + def forward_comfy_cast_weights(self, input): + weight, bias, offload_stream = cast_bias_weight(self, input, offloadable=True) + running_mean = self.running_mean.to(device=input.device, dtype=weight.dtype) if self.running_mean is not None else None + running_var = self.running_var.to(device=input.device, dtype=weight.dtype) if self.running_var is not None else None + x = torch.nn.functional.batch_norm(input, running_mean, running_var, weight, bias, self.training, self.momentum, self.eps) + uncast_bias_weight(self, weight, bias, offload_stream) + return x + + def forward(self, *args, **kwargs): + run_every_op() + if self.comfy_cast_weights or len(self.weight_function) > 0 or len(self.bias_function) > 0: + return self.forward_comfy_cast_weights(*args, **kwargs) + else: + return super().forward(*args, **kwargs) + class LayerNorm(torch.nn.LayerNorm, CastWeightBiasOp): def reset_parameters(self): return None @@ -749,6 +768,9 @@ class manual_cast(disable_weight_init): class Conv3d(disable_weight_init.Conv3d): comfy_cast_weights = True + class BatchNorm2d(disable_weight_init.BatchNorm2d): + comfy_cast_weights = True + class GroupNorm(disable_weight_init.GroupNorm): comfy_cast_weights = True diff --git a/comfy_api/latest/_io.py b/comfy_api/latest/_io.py index e50266bc5..5ed968960 100644 --- a/comfy_api/latest/_io.py +++ b/comfy_api/latest/_io.py @@ -17,6 +17,7 @@ if TYPE_CHECKING: from spandrel import ImageModelDescriptor from comfy.clip_vision import ClipVisionModel from comfy.clip_vision import Output as ClipVisionOutput_ + from comfy.bg_removal_model import BackgroundRemovalModel from comfy.controlnet import ControlNet from comfy.hooks import HookGroup, HookKeyframeGroup from comfy.model_patcher import ModelPatcher @@ -614,6 +615,11 @@ class Model(ComfyTypeIO): if TYPE_CHECKING: Type = ModelPatcher +@comfytype(io_type="BACKGROUND_REMOVAL") +class BackgroundRemoval(ComfyTypeIO): + if TYPE_CHECKING: + Type = BackgroundRemovalModel + @comfytype(io_type="CLIP_VISION") class ClipVision(ComfyTypeIO): if TYPE_CHECKING: @@ -2257,6 +2263,7 @@ __all__ = [ "ModelPatch", "ClipVision", "ClipVisionOutput", + "BackgroundRemoval", "AudioEncoder", "AudioEncoderOutput", "StyleModel", diff --git a/comfy_extras/nodes_bg_removal.py b/comfy_extras/nodes_bg_removal.py new file mode 100644 index 000000000..8d046b8d4 --- /dev/null +++ b/comfy_extras/nodes_bg_removal.py @@ -0,0 +1,60 @@ +import folder_paths +from typing_extensions import override +from comfy_api.latest import ComfyExtension, IO +from comfy.bg_removal_model import load + + +class LoadBackgroundRemovalModel(IO.ComfyNode): + @classmethod + def define_schema(cls): + files = folder_paths.get_filename_list("background_removal") + return IO.Schema( + node_id="LoadBackgroundRemovalModel", + display_name="Load Background Removal Model", + category="loaders", + inputs=[ + IO.Combo.Input("bg_removal_name", options=sorted(files), tooltip="The model used to remove backgrounds from images"), + ], + outputs=[ + IO.BackgroundRemoval.Output("bg_model") + ] + ) + @classmethod + def execute(cls, bg_removal_name): + path = folder_paths.get_full_path_or_raise("background_removal", bg_removal_name) + bg = load(path) + if bg is None: + raise RuntimeError("ERROR: background model file is invalid and does not contain a valid background removal model.") + return IO.NodeOutput(bg) + +class RemoveBackground(IO.ComfyNode): + @classmethod + def define_schema(cls): + return IO.Schema( + node_id="RemoveBackground", + display_name="Remove Background", + category="image/background removal", + inputs=[ + IO.Image.Input("image", tooltip="Input image to remove the background from"), + IO.BackgroundRemoval.Input("bg_removal_model", tooltip="Background removal model used to generate the mask") + ], + outputs=[ + IO.Mask.Output("mask", tooltip="Generated foreground mask") + ] + ) + @classmethod + def execute(cls, image, bg_removal_model): + mask = bg_removal_model.encode_image(image) + return IO.NodeOutput(mask) + +class BackgroundRemovalExtension(ComfyExtension): + @override + async def get_node_list(self) -> list[type[IO.ComfyNode]]: + return [ + LoadBackgroundRemovalModel, + RemoveBackground + ] + + +async def comfy_entrypoint() -> BackgroundRemovalExtension: + return BackgroundRemovalExtension() diff --git a/comfy_extras/nodes_mask.py b/comfy_extras/nodes_mask.py index 43a933dac..c9b2a84d9 100644 --- a/comfy_extras/nodes_mask.py +++ b/comfy_extras/nodes_mask.py @@ -40,10 +40,21 @@ def composite(destination, source, x, y, mask = None, multiplier = 8, resize_sou inverse_mask = torch.ones_like(mask) - mask - source_portion = mask * source[..., :visible_height, :visible_width] - destination_portion = inverse_mask * destination[..., top:bottom, left:right] + source_rgb = source[:, :3, :visible_height, :visible_width] + dest_slice = destination[..., top:bottom, left:right] + + if destination.shape[1] == 4: + if torch.max(dest_slice) == 0: + destination[:, :3, top:bottom, left:right] = source_rgb + destination[:, 3:4, top:bottom, left:right] = mask + else: + destination[:, :3, top:bottom, left:right] = (mask * source_rgb) + (inverse_mask * dest_slice[:, :3]) + destination[:, 3:4, top:bottom, left:right] = torch.max(mask, dest_slice[:, 3:4]) + else: + source_portion = mask * source_rgb + destination_portion = inverse_mask * dest_slice + destination[..., top:bottom, left:right] = source_portion + destination_portion - destination[..., top:bottom, left:right] = source_portion + destination_portion return destination class LatentCompositeMasked(IO.ComfyNode): @@ -84,18 +95,23 @@ class ImageCompositeMasked(IO.ComfyNode): display_name="Image Composite Masked", category="image", inputs=[ - IO.Image.Input("destination"), IO.Image.Input("source"), IO.Int.Input("x", default=0, min=0, max=nodes.MAX_RESOLUTION, step=1), IO.Int.Input("y", default=0, min=0, max=nodes.MAX_RESOLUTION, step=1), IO.Boolean.Input("resize_source", default=False), + IO.Image.Input("destination", optional=True), IO.Mask.Input("mask", optional=True), ], outputs=[IO.Image.Output()], ) @classmethod - def execute(cls, destination, source, x, y, resize_source, mask = None) -> IO.NodeOutput: + def execute(cls, source, x, y, resize_source, destination = None, mask = None) -> IO.NodeOutput: + if destination is None: # transparent rgba + B, H, W, C = source.shape + destination = torch.zeros((B, H, W, 4), dtype=source.dtype, device=source.device) + if C == 3: + source = torch.nn.functional.pad(source, (0, 1), value=1.0) destination, source = node_helpers.image_alpha_fix(destination, source) destination = destination.clone().movedim(-1, 1) output = composite(destination, source.movedim(-1, 1), x, y, mask, 1, resize_source).movedim(1, -1) @@ -381,7 +397,6 @@ class GrowMask(IO.ComfyNode): expand_mask = execute # TODO: remove - class ThresholdMask(IO.ComfyNode): @classmethod def define_schema(cls): diff --git a/folder_paths.py b/folder_paths.py index 98d3b1880..92e8df3cf 100644 --- a/folder_paths.py +++ b/folder_paths.py @@ -52,6 +52,8 @@ folder_names_and_paths["model_patches"] = ([os.path.join(models_dir, "model_patc folder_names_and_paths["audio_encoders"] = ([os.path.join(models_dir, "audio_encoders")], supported_pt_extensions) +folder_names_and_paths["background_removal"] = ([os.path.join(models_dir, "background_removal")], supported_pt_extensions) + folder_names_and_paths["frame_interpolation"] = ([os.path.join(models_dir, "frame_interpolation")], supported_pt_extensions) folder_names_and_paths["optical_flow"] = ([os.path.join(models_dir, "optical_flow")], supported_pt_extensions) diff --git a/models/background_removal/put_background_removal_models_here b/models/background_removal/put_background_removal_models_here new file mode 100644 index 000000000..e69de29bb diff --git a/nodes.py b/nodes.py index ae9e70cb9..5755f0bb8 100644 --- a/nodes.py +++ b/nodes.py @@ -2429,6 +2429,7 @@ async def init_builtin_extra_nodes(): "nodes_number_convert.py", "nodes_painter.py", "nodes_curve.py", + "nodes_bg_removal.py", "nodes_rtdetr.py", "nodes_frame_interpolation.py", "nodes_sam3.py",