Merge branch 'master' into cursor/mark-deprecated-cloud-endpoints-e81e
Some checks are pending
Python Linting / Run Ruff (push) Waiting to run
Python Linting / Run Pylint (push) Waiting to run
Build package / Build Test (3.10) (push) Waiting to run
Build package / Build Test (3.11) (push) Waiting to run
Build package / Build Test (3.12) (push) Waiting to run
Build package / Build Test (3.13) (push) Waiting to run
Build package / Build Test (3.14) (push) Waiting to run

This commit is contained in:
Matt Miller 2026-05-11 11:45:42 -07:00 committed by GitHub
commit 3beca6e30e
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
87 changed files with 23875 additions and 725 deletions

View File

@ -431,9 +431,10 @@
"extra": {
"workflowRendererVersion": "LG"
},
"category": "Image Tools/Color adjust"
"category": "Image Tools/Color adjust",
"description": "Adjusts image brightness and contrast using a real-time GPU fragment shader."
}
]
},
"extra": {}
}
}

View File

@ -162,7 +162,7 @@
},
"revision": 0,
"config": {},
"name": "local-Canny to Image (Z-Image-Turbo)",
"name": "Canny to Image (Z-Image-Turbo)",
"inputNode": {
"id": -10,
"bounding": [
@ -1553,7 +1553,8 @@
"VHS_MetadataImage": true,
"VHS_KeepIntermediate": true
},
"category": "Image generation and editing/Canny to image"
"category": "Image generation and editing/Canny to image",
"description": "Generates an image from a Canny edge map using Z-Image-Turbo, with text conditioning."
}
]
},
@ -1574,4 +1575,4 @@
}
},
"version": 0.4
}
}

View File

@ -192,7 +192,7 @@
},
"revision": 0,
"config": {},
"name": "local-Canny to Video (LTX 2.0)",
"name": "Canny to Video (LTX 2.0)",
"inputNode": {
"id": -10,
"bounding": [
@ -3600,7 +3600,8 @@
"extra": {
"workflowRendererVersion": "LG"
},
"category": "Video generation and editing/Canny to video"
"category": "Video generation and editing/Canny to video",
"description": "Generates video from Canny edge maps using LTX-2, with optional synchronized audio."
}
]
},
@ -3616,4 +3617,4 @@
}
},
"version": 0.4
}
}

View File

@ -377,8 +377,9 @@
"extra": {
"workflowRendererVersion": "LG"
},
"category": "Image Tools/Color adjust"
"category": "Image Tools/Color adjust",
"description": "Adds lens-style chromatic aberration (color fringing) using a real-time GPU fragment shader."
}
]
}
}
}

View File

@ -596,7 +596,8 @@
"extra": {
"workflowRendererVersion": "LG"
},
"category": "Image Tools/Color adjust"
"category": "Image Tools/Color adjust",
"description": "Adjusts saturation, temperature, tint, and vibrance using a real-time GPU fragment shader."
}
]
}

View File

@ -1129,7 +1129,8 @@
"extra": {
"workflowRendererVersion": "LG"
},
"category": "Image Tools/Color adjust"
"category": "Image Tools/Color adjust",
"description": "Balances colors across shadows, midtones, and highlights using a real-time GPU fragment shader."
}
]
}

View File

@ -608,7 +608,8 @@
"extra": {
"workflowRendererVersion": "LG"
},
"category": "Image Tools/Color adjust"
"category": "Image Tools/Color adjust",
"description": "Fine-tunes tone and color with per-channel curve adjustments using a real-time GPU fragment shader."
}
]
}

File diff suppressed because it is too large Load Diff

View File

@ -1609,7 +1609,8 @@
}
],
"extra": {},
"category": "Image Tools/Crop"
"category": "Image Tools/Crop",
"description": "Splits an image into a 2×2 grid of four equal tiles."
}
]
},

View File

@ -2946,7 +2946,8 @@
}
],
"extra": {},
"category": "Image Tools/Crop"
"category": "Image Tools/Crop",
"description": "Splits an image into a 3×3 grid of nine equal tiles."
}
]
},

View File

@ -1579,7 +1579,8 @@
"VHS_MetadataImage": true,
"VHS_KeepIntermediate": true
},
"category": "Image generation and editing/Depth to image"
"category": "Image generation and editing/Depth to image",
"description": "Generates an image from a depth map using Z-Image-Turbo with text conditioning."
},
{
"id": "458bdf3c-4b58-421c-af50-c9c663a4d74c",
@ -2461,7 +2462,8 @@
]
},
"workflowRendererVersion": "LG"
}
},
"description": "Estimates a monocular depth map from an input image using the Lotus depth estimation model."
}
]
},

View File

@ -4233,7 +4233,8 @@
"extra": {
"workflowRendererVersion": "LG"
},
"category": "Video generation and editing/Depth to video"
"category": "Video generation and editing/Depth to video",
"description": "Generates depth-controlled video with LTX-2: motion and structure follow a depth-reference video alongside text prompting, optional first-frame image conditioning, with optional synchronized audio."
},
{
"id": "38b60539-50a7-42f9-a5fe-bdeca26272e2",
@ -5192,7 +5193,8 @@
],
"extra": {
"workflowRendererVersion": "LG"
}
},
"description": "Estimates a monocular depth map from an input image using the Lotus depth estimation model."
}
]
},

View File

@ -450,9 +450,10 @@
"extra": {
"workflowRendererVersion": "LG"
},
"category": "Image Tools/Blur"
"category": "Image Tools/Blur",
"description": "Applies bilateral (edge-preserving) blur to soften images while retaining detail."
}
]
},
"extra": {}
}
}

View File

@ -580,8 +580,9 @@
"extra": {
"workflowRendererVersion": "LG"
},
"category": "Image Tools/Color adjust"
"category": "Image Tools/Color adjust",
"description": "Adds procedural film grain texture for a cinematic look via GPU fragment shader."
}
]
}
}
}

View File

@ -3350,7 +3350,8 @@
}
],
"extra": {},
"category": "Video generation and editing/First-Last-Frame to Video"
"category": "Video generation and editing/First-Last-Frame to Video",
"description": "Generates a video interpolating between first and last keyframes using LTX-2.3."
}
]
},

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,858 @@
{
"revision": 0,
"last_node_id": 16,
"last_link_id": 0,
"nodes": [
{
"id": 16,
"type": "022693be-2baa-4009-870a-28921508a7ef",
"pos": [
-2990,
-3240
],
"size": [
410,
200
],
"flags": {},
"order": 2,
"mode": 0,
"inputs": [
{
"localized_name": "video",
"name": "video",
"type": "VIDEO",
"link": null
},
{
"label": "multiplier",
"name": "value",
"type": "INT",
"widget": {
"name": "value"
},
"link": null
},
{
"label": "enable_fps_multiplier",
"name": "value_1",
"type": "BOOLEAN",
"widget": {
"name": "value_1"
},
"link": null
},
{
"name": "model_name",
"type": "COMBO",
"widget": {
"name": "model_name"
},
"link": null
}
],
"outputs": [
{
"label": "VIDEO",
"name": "VIDEO_1",
"type": "VIDEO",
"links": []
},
{
"name": "IMAGE",
"type": "IMAGE",
"links": null
}
],
"properties": {
"proxyWidgets": [
[
"9",
"value"
],
[
"13",
"value"
],
[
"1",
"model_name"
]
],
"enableTabs": false,
"tabWidth": 65,
"tabXOffset": 10,
"hasSecondTab": false,
"secondTabText": "Send Back",
"secondTabOffset": 80,
"secondTabWidth": 65,
"cnr_id": "comfy-core",
"ver": "0.19.3"
},
"widgets_values": [],
"title": "Frame Interpolation"
}
],
"links": [],
"version": 0.4,
"definitions": {
"subgraphs": [
{
"id": "022693be-2baa-4009-870a-28921508a7ef",
"version": 1,
"state": {
"lastGroupId": 0,
"lastNodeId": 17,
"lastLinkId": 28,
"lastRerouteId": 0
},
"revision": 0,
"config": {},
"name": "Frame Interpolation",
"inputNode": {
"id": -10,
"bounding": [
-2810,
-3070,
159.7421875,
120
]
},
"outputNode": {
"id": -20,
"bounding": [
-1270,
-3075,
120,
80
]
},
"inputs": [
{
"id": "05e31c51-dcb6-4a1e-9651-1b9ad4f7a287",
"name": "video",
"type": "VIDEO",
"linkIds": [
2
],
"localized_name": "video",
"pos": [
-2670.2578125,
-3050
]
},
{
"id": "feecb409-7d1c-4a99-9c63-50c5fecdd3c9",
"name": "value",
"type": "INT",
"linkIds": [
22
],
"label": "multiplier",
"pos": [
-2670.2578125,
-3030
]
},
{
"id": "0b8a861b-b581-4068-9e8c-f8d15daf1ca6",
"name": "value_1",
"type": "BOOLEAN",
"linkIds": [
23
],
"label": "enable_fps_multiplier",
"pos": [
-2670.2578125,
-3010
]
},
{
"id": "a22b101e-8773-4e17-a297-7ee3aae09162",
"name": "model_name",
"type": "COMBO",
"linkIds": [
24
],
"pos": [
-2670.2578125,
-2990
]
}
],
"outputs": [
{
"id": "ef2ada05-d5aa-492a-9394-6c3e71e39ebb",
"name": "VIDEO_1",
"type": "VIDEO",
"linkIds": [
26
],
"label": "VIDEO",
"pos": [
-1250,
-3055
]
},
{
"id": "5aacc622-2a07-4983-b31c-e04461f7f953",
"name": "IMAGE",
"type": "IMAGE",
"linkIds": [
28
],
"pos": [
-1250,
-3035
]
}
],
"widgets": [],
"nodes": [
{
"id": 1,
"type": "FrameInterpolationModelLoader",
"pos": [
-2510,
-3370
],
"size": [
370,
90
],
"flags": {},
"order": 0,
"mode": 0,
"inputs": [
{
"localized_name": "model_name",
"name": "model_name",
"type": "COMBO",
"widget": {
"name": "model_name"
},
"link": 24
}
],
"outputs": [
{
"localized_name": "INTERP_MODEL",
"name": "INTERP_MODEL",
"type": "INTERP_MODEL",
"links": [
1
]
}
],
"properties": {
"Node name for S&R": "FrameInterpolationModelLoader",
"enableTabs": false,
"tabWidth": 65,
"tabXOffset": 10,
"hasSecondTab": false,
"secondTabText": "Send Back",
"secondTabOffset": 80,
"secondTabWidth": 65,
"cnr_id": "comfy-core",
"ver": "0.19.3",
"models": [
{
"name": "film_net_fp16.safetensors",
"url": "https://huggingface.co/Comfy-Org/frame_interpolation/resolve/main/frame_interpolation/film_net_fp16.safetensors",
"directory": "frame_interpolation"
}
]
},
"widgets_values": [
"film_net_fp16.safetensors"
]
},
{
"id": 2,
"type": "FrameInterpolate",
"pos": [
-2040,
-3370
],
"size": [
270,
110
],
"flags": {},
"order": 1,
"mode": 0,
"inputs": [
{
"localized_name": "interp_model",
"name": "interp_model",
"type": "INTERP_MODEL",
"link": 1
},
{
"localized_name": "images",
"name": "images",
"type": "IMAGE",
"link": 3
},
{
"localized_name": "multiplier",
"name": "multiplier",
"type": "INT",
"widget": {
"name": "multiplier"
},
"link": 8
}
],
"outputs": [
{
"localized_name": "IMAGE",
"name": "IMAGE",
"type": "IMAGE",
"links": [
4,
28
]
}
],
"properties": {
"Node name for S&R": "FrameInterpolate",
"enableTabs": false,
"tabWidth": 65,
"tabXOffset": 10,
"hasSecondTab": false,
"secondTabText": "Send Back",
"secondTabOffset": 80,
"secondTabWidth": 65,
"cnr_id": "comfy-core",
"ver": "0.19.3"
},
"widgets_values": [
2
]
},
{
"id": 5,
"type": "CreateVideo",
"pos": [
-1600,
-3370
],
"size": [
270,
110
],
"flags": {},
"order": 3,
"mode": 0,
"inputs": [
{
"localized_name": "images",
"name": "images",
"type": "IMAGE",
"link": 4
},
{
"localized_name": "audio",
"name": "audio",
"shape": 7,
"type": "AUDIO",
"link": 5
},
{
"localized_name": "fps",
"name": "fps",
"type": "FLOAT",
"widget": {
"name": "fps"
},
"link": 12
}
],
"outputs": [
{
"localized_name": "VIDEO",
"name": "VIDEO",
"type": "VIDEO",
"links": [
26
]
}
],
"properties": {
"Node name for S&R": "CreateVideo",
"enableTabs": false,
"tabWidth": 65,
"tabXOffset": 10,
"hasSecondTab": false,
"secondTabText": "Send Back",
"secondTabOffset": 80,
"secondTabWidth": 65,
"cnr_id": "comfy-core",
"ver": "0.19.3"
},
"widgets_values": [
30
]
},
{
"id": 9,
"type": "PrimitiveInt",
"pos": [
-2500,
-2970
],
"size": [
270,
90
],
"flags": {},
"order": 4,
"mode": 0,
"inputs": [
{
"localized_name": "value",
"name": "value",
"type": "INT",
"widget": {
"name": "value"
},
"link": 22
}
],
"outputs": [
{
"localized_name": "INT",
"name": "INT",
"type": "INT",
"links": [
8,
19
]
}
],
"title": "Int (Multiplier)",
"properties": {
"Node name for S&R": "PrimitiveInt",
"enableTabs": false,
"tabWidth": 65,
"tabXOffset": 10,
"hasSecondTab": false,
"secondTabText": "Send Back",
"secondTabOffset": 80,
"secondTabWidth": 65,
"cnr_id": "comfy-core",
"ver": "0.19.3"
},
"widgets_values": [
2,
"fixed"
]
},
{
"id": 10,
"type": "ComfySwitchNode",
"pos": [
-1610,
-3120
],
"size": [
270,
130
],
"flags": {},
"order": 5,
"mode": 0,
"inputs": [
{
"localized_name": "on_false",
"name": "on_false",
"type": "*",
"link": 11
},
{
"localized_name": "on_true",
"name": "on_true",
"type": "*",
"link": 13
},
{
"localized_name": "switch",
"name": "switch",
"type": "BOOLEAN",
"widget": {
"name": "switch"
},
"link": 15
}
],
"outputs": [
{
"localized_name": "output",
"name": "output",
"type": "*",
"links": [
12
]
}
],
"properties": {
"Node name for S&R": "ComfySwitchNode",
"enableTabs": false,
"tabWidth": 65,
"tabXOffset": 10,
"hasSecondTab": false,
"secondTabText": "Send Back",
"secondTabOffset": 80,
"secondTabWidth": 65,
"cnr_id": "comfy-core",
"ver": "0.19.3"
},
"widgets_values": [
true
]
},
{
"id": 13,
"type": "PrimitiveBoolean",
"pos": [
-2500,
-2770
],
"size": [
310,
90
],
"flags": {},
"order": 7,
"mode": 0,
"inputs": [
{
"localized_name": "value",
"name": "value",
"type": "BOOLEAN",
"widget": {
"name": "value"
},
"link": 23
}
],
"outputs": [
{
"localized_name": "BOOLEAN",
"name": "BOOLEAN",
"type": "BOOLEAN",
"links": [
15
]
}
],
"title": "Boolean (Apply multiplier to FPS?)",
"properties": {
"Node name for S&R": "PrimitiveBoolean",
"enableTabs": false,
"tabWidth": 65,
"tabXOffset": 10,
"hasSecondTab": false,
"secondTabText": "Send Back",
"secondTabOffset": 80,
"secondTabWidth": 65,
"cnr_id": "comfy-core",
"ver": "0.19.3"
},
"widgets_values": [
true
]
},
{
"id": 3,
"type": "GetVideoComponents",
"pos": [
-2500,
-3170
],
"size": [
230,
100
],
"flags": {},
"order": 2,
"mode": 0,
"inputs": [
{
"localized_name": "video",
"name": "video",
"type": "VIDEO",
"link": 2
}
],
"outputs": [
{
"localized_name": "images",
"name": "images",
"type": "IMAGE",
"links": [
3
]
},
{
"localized_name": "audio",
"name": "audio",
"type": "AUDIO",
"links": [
5
]
},
{
"localized_name": "fps",
"name": "fps",
"type": "FLOAT",
"links": [
11,
18
]
}
],
"properties": {
"Node name for S&R": "GetVideoComponents",
"enableTabs": false,
"tabWidth": 65,
"tabXOffset": 10,
"hasSecondTab": false,
"secondTabText": "Send Back",
"secondTabOffset": 80,
"secondTabWidth": 65,
"cnr_id": "comfy-core",
"ver": "0.19.3"
}
},
{
"id": 11,
"type": "ComfyMathExpression",
"pos": [
-2090,
-3070
],
"size": [
400,
210
],
"flags": {
"collapsed": false
},
"order": 6,
"mode": 0,
"inputs": [
{
"label": "a",
"localized_name": "values.a",
"name": "values.a",
"type": "FLOAT,INT",
"link": 18
},
{
"label": "b",
"localized_name": "values.b",
"name": "values.b",
"shape": 7,
"type": "FLOAT,INT",
"link": 19
},
{
"label": "c",
"localized_name": "values.c",
"name": "values.c",
"shape": 7,
"type": "FLOAT,INT",
"link": null
},
{
"localized_name": "expression",
"name": "expression",
"type": "STRING",
"widget": {
"name": "expression"
},
"link": null
}
],
"outputs": [
{
"localized_name": "FLOAT",
"name": "FLOAT",
"type": "FLOAT",
"links": [
13
]
},
{
"localized_name": "INT",
"name": "INT",
"type": "INT",
"links": null
}
],
"properties": {
"Node name for S&R": "ComfyMathExpression",
"enableTabs": false,
"tabWidth": 65,
"tabXOffset": 10,
"hasSecondTab": false,
"secondTabText": "Send Back",
"secondTabOffset": 80,
"secondTabWidth": 65,
"cnr_id": "comfy-core",
"ver": "0.19.3"
},
"widgets_values": [
"min(abs(b), 16) * a"
]
}
],
"groups": [],
"links": [
{
"id": 1,
"origin_id": 1,
"origin_slot": 0,
"target_id": 2,
"target_slot": 0,
"type": "INTERP_MODEL"
},
{
"id": 3,
"origin_id": 3,
"origin_slot": 0,
"target_id": 2,
"target_slot": 1,
"type": "IMAGE"
},
{
"id": 8,
"origin_id": 9,
"origin_slot": 0,
"target_id": 2,
"target_slot": 2,
"type": "INT"
},
{
"id": 4,
"origin_id": 2,
"origin_slot": 0,
"target_id": 5,
"target_slot": 0,
"type": "IMAGE"
},
{
"id": 5,
"origin_id": 3,
"origin_slot": 1,
"target_id": 5,
"target_slot": 1,
"type": "AUDIO"
},
{
"id": 12,
"origin_id": 10,
"origin_slot": 0,
"target_id": 5,
"target_slot": 2,
"type": "FLOAT"
},
{
"id": 11,
"origin_id": 3,
"origin_slot": 2,
"target_id": 10,
"target_slot": 0,
"type": "FLOAT"
},
{
"id": 13,
"origin_id": 11,
"origin_slot": 0,
"target_id": 10,
"target_slot": 1,
"type": "FLOAT"
},
{
"id": 15,
"origin_id": 13,
"origin_slot": 0,
"target_id": 10,
"target_slot": 2,
"type": "BOOLEAN"
},
{
"id": 18,
"origin_id": 3,
"origin_slot": 2,
"target_id": 11,
"target_slot": 0,
"type": "FLOAT"
},
{
"id": 19,
"origin_id": 9,
"origin_slot": 0,
"target_id": 11,
"target_slot": 1,
"type": "INT"
},
{
"id": 2,
"origin_id": -10,
"origin_slot": 0,
"target_id": 3,
"target_slot": 0,
"type": "VIDEO"
},
{
"id": 22,
"origin_id": -10,
"origin_slot": 1,
"target_id": 9,
"target_slot": 0,
"type": "INT"
},
{
"id": 23,
"origin_id": -10,
"origin_slot": 2,
"target_id": 13,
"target_slot": 0,
"type": "BOOLEAN"
},
{
"id": 24,
"origin_id": -10,
"origin_slot": 3,
"target_id": 1,
"target_slot": 0,
"type": "COMBO"
},
{
"id": 26,
"origin_id": 5,
"origin_slot": 0,
"target_id": -20,
"target_slot": 0,
"type": "VIDEO"
},
{
"id": 28,
"origin_id": 2,
"origin_slot": 0,
"target_id": -20,
"target_slot": 1,
"type": "IMAGE"
}
],
"extra": {},
"category": "Video Tools",
"description": "Increases video frame rate by synthesizing intermediate frames with a frame interpolation model."
}
]
},
"extra": {}
}

View File

@ -0,0 +1,485 @@
{
"revision": 0,
"last_node_id": 98,
"last_link_id": 0,
"nodes": [
{
"id": 98,
"type": "dca6e78d-fb06-421e-97f7-6ce17a665260",
"pos": [
-410,
-2230
],
"size": [
270,
104
],
"flags": {},
"order": 7,
"mode": 0,
"inputs": [
{
"name": "video",
"type": "VIDEO",
"link": null
},
{
"label": "frame_index",
"name": "value",
"type": "INT",
"widget": {
"name": "value"
},
"link": null
}
],
"outputs": [
{
"name": "IMAGE",
"type": "IMAGE",
"links": []
}
],
"title": "Get Any Video Frame",
"properties": {
"proxyWidgets": [
[
"100",
"value"
]
]
},
"widgets_values": []
}
],
"links": [],
"version": 0.4,
"definitions": {
"subgraphs": [
{
"id": "dca6e78d-fb06-421e-97f7-6ce17a665260",
"version": 1,
"state": {
"lastGroupId": 1,
"lastNodeId": 136,
"lastLinkId": 302,
"lastRerouteId": 0
},
"revision": 0,
"config": {},
"name": "Get Any Video Frame",
"inputNode": {
"id": -10,
"bounding": [
380,
-57,
120,
80
]
},
"outputNode": {
"id": -20,
"bounding": [
1460,
-57,
120,
60
]
},
"inputs": [
{
"id": "2ceec378-8dcf-4340-8570-155967f59a93",
"name": "video",
"type": "VIDEO",
"linkIds": [
4
],
"pos": [
480,
-37
]
},
{
"id": "819955f6-c686-4896-8032-ff2d0059109a",
"name": "value",
"type": "INT",
"linkIds": [
283
],
"label": "frame_index",
"pos": [
480,
-17
]
}
],
"outputs": [
{
"id": "1ab0684d-6a44-45b6-8aa4-a0b971a1d41e",
"name": "IMAGE",
"type": "IMAGE",
"linkIds": [
5
],
"pos": [
1480,
-37
]
}
],
"widgets": [],
"nodes": [
{
"id": 1,
"type": "GetVideoComponents",
"pos": [
560,
-150
],
"size": [
230,
120
],
"flags": {},
"order": 0,
"mode": 0,
"inputs": [
{
"localized_name": "video",
"name": "video",
"type": "VIDEO",
"link": 4
}
],
"outputs": [
{
"localized_name": "images",
"name": "images",
"type": "IMAGE",
"links": [
1,
2
]
},
{
"localized_name": "audio",
"name": "audio",
"type": "AUDIO",
"links": null
},
{
"localized_name": "fps",
"name": "fps",
"type": "FLOAT",
"links": null
}
],
"properties": {
"Node name for S&R": "GetVideoComponents"
}
},
{
"id": 2,
"type": "GetImageSize",
"pos": [
560,
50
],
"size": [
230,
120
],
"flags": {},
"order": 1,
"mode": 0,
"inputs": [
{
"localized_name": "image",
"name": "image",
"type": "IMAGE",
"link": 1
}
],
"outputs": [
{
"localized_name": "width",
"name": "width",
"type": "INT",
"links": null
},
{
"localized_name": "height",
"name": "height",
"type": "INT",
"links": null
},
{
"localized_name": "batch_size",
"name": "batch_size",
"type": "INT",
"links": [
285
]
}
],
"properties": {
"Node name for S&R": "GetImageSize"
}
},
{
"id": 3,
"type": "ImageFromBatch",
"pos": [
1130,
-150
],
"size": [
270,
140
],
"flags": {},
"order": 2,
"mode": 0,
"inputs": [
{
"localized_name": "image",
"name": "image",
"type": "IMAGE",
"link": 2
},
{
"localized_name": "batch_index",
"name": "batch_index",
"type": "INT",
"widget": {
"name": "batch_index"
},
"link": 286
},
{
"localized_name": "length",
"name": "length",
"type": "INT",
"widget": {
"name": "length"
},
"link": null
}
],
"outputs": [
{
"localized_name": "IMAGE",
"name": "IMAGE",
"type": "IMAGE",
"links": [
5
]
}
],
"properties": {
"Node name for S&R": "ImageFromBatch"
},
"widgets_values": [
0,
1
]
},
{
"id": 99,
"type": "ComfyMathExpression",
"pos": [
910,
100
],
"size": [
400,
200
],
"flags": {},
"order": 3,
"mode": 0,
"inputs": [
{
"label": "a",
"localized_name": "values.a",
"name": "values.a",
"type": "FLOAT,INT",
"link": 284
},
{
"label": "b",
"localized_name": "values.b",
"name": "values.b",
"shape": 7,
"type": "FLOAT,INT",
"link": 285
},
{
"label": "c",
"localized_name": "values.c",
"name": "values.c",
"shape": 7,
"type": "FLOAT,INT",
"link": null
},
{
"localized_name": "expression",
"name": "expression",
"type": "STRING",
"widget": {
"name": "expression"
},
"link": null
}
],
"outputs": [
{
"localized_name": "FLOAT",
"name": "FLOAT",
"type": "FLOAT",
"links": null
},
{
"localized_name": "INT",
"name": "INT",
"type": "INT",
"links": [
286
]
}
],
"properties": {
"Node name for S&R": "ComfyMathExpression"
},
"widgets_values": [
"min(max(int(a if a >= 0 else b + a), 0), b - 1)"
]
},
{
"id": 100,
"type": "PrimitiveInt",
"pos": [
560,
250
],
"size": [
270,
110
],
"flags": {},
"order": 4,
"mode": 0,
"inputs": [
{
"localized_name": "value",
"name": "value",
"type": "INT",
"widget": {
"name": "value"
},
"link": 283
}
],
"outputs": [
{
"localized_name": "INT",
"name": "INT",
"type": "INT",
"links": [
284
]
}
],
"properties": {
"Node name for S&R": "PrimitiveInt"
},
"widgets_values": [
0,
"fixed"
]
}
],
"groups": [],
"links": [
{
"id": 1,
"origin_id": 1,
"origin_slot": 0,
"target_id": 2,
"target_slot": 0,
"type": "IMAGE"
},
{
"id": 2,
"origin_id": 1,
"origin_slot": 0,
"target_id": 3,
"target_slot": 0,
"type": "IMAGE"
},
{
"id": 4,
"origin_id": -10,
"origin_slot": 0,
"target_id": 1,
"target_slot": 0,
"type": "VIDEO"
},
{
"id": 5,
"origin_id": 3,
"origin_slot": 0,
"target_id": -20,
"target_slot": 0,
"type": "IMAGE"
},
{
"id": 283,
"origin_id": -10,
"origin_slot": 1,
"target_id": 100,
"target_slot": 0,
"type": "INT"
},
{
"id": 284,
"origin_id": 100,
"origin_slot": 0,
"target_id": 99,
"target_slot": 0,
"type": "INT"
},
{
"id": 285,
"origin_id": 2,
"origin_slot": 2,
"target_id": 99,
"target_slot": 1,
"type": "INT"
},
{
"id": 286,
"origin_id": 99,
"origin_slot": 1,
"target_id": 3,
"target_slot": 1,
"type": "INT"
}
],
"extra": {},
"category": "Video Tools",
"description": "Extracts one image frame from a video at a chosen index, with optional trim and FPS control."
}
]
},
"extra": {
"ds": {
"scale": 1.197015527856339,
"offset": [
-168.76833554248222,
540.6638955283997
]
},
"frontendVersion": "1.42.8"
}
}

View File

@ -575,8 +575,9 @@
"extra": {
"workflowRendererVersion": "LG"
},
"category": "Image Tools/Color adjust"
"category": "Image Tools/Color adjust",
"description": "Adds a glow/bloom effect around bright image areas via GPU fragment shader."
}
]
}
}
}

View File

@ -752,8 +752,9 @@
"extra": {
"workflowRendererVersion": "LG"
},
"category": "Image Tools/Color adjust"
"category": "Image Tools/Color adjust",
"description": "Adjusts hue, saturation, and lightness of an image using a real-time GPU fragment shader."
}
]
}
}
}

View File

@ -374,7 +374,8 @@
"extra": {
"workflowRendererVersion": "LG"
},
"category": "Image Tools/Blur"
"category": "Image Tools/Blur",
"description": "Applies Gaussian, Box, or Radial blur to soften images and create stylized depth or motion effects."
}
]
}

View File

@ -310,7 +310,8 @@
"extra": {
"workflowRendererVersion": "LG"
},
"category": "Text generation/Image Captioning"
"category": "Text generation/Image Captioning",
"description": "Generates descriptive captions for images using Google's Gemini multimodal LLM."
}
]
}

View File

@ -315,8 +315,9 @@
"extra": {
"workflowRendererVersion": "LG"
},
"category": "Image Tools/Color adjust"
"category": "Image Tools/Color adjust",
"description": "Manipulates individual RGBA channels for masking, compositing, and channel effects."
}
]
}
}
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1472,7 +1472,8 @@
"extra": {
"workflowRendererVersion": "LG"
},
"category": "Image generation and editing/Edit image"
"category": "Image generation and editing/Edit image",
"description": "Edits an input image via text instructions using FLUX.2 [klein] 4B."
},
{
"id": "6007e698-2ebd-4917-84d8-299b35d7b7ab",
@ -1821,7 +1822,8 @@
],
"extra": {
"workflowRendererVersion": "LG"
}
},
"description": "Applies reference image conditioning for style/identity transfer (Flux.2 Klein 4B)."
}
]
},
@ -1837,4 +1839,4 @@
}
},
"version": 0.4
}
}

View File

@ -1417,7 +1417,8 @@
}
],
"extra": {},
"category": "Image generation and editing/Edit image"
"category": "Image generation and editing/Edit image",
"description": "Edits images via text instructions using LongCat Image Edit, an instruction-following image editing diffusion model."
}
]
},

File diff suppressed because it is too large Load Diff

View File

@ -132,7 +132,7 @@
},
"revision": 0,
"config": {},
"name": "local-Image Edit (Qwen 2511)",
"name": "Image Edit (Qwen 2511)",
"inputNode": {
"id": -10,
"bounding": [
@ -1468,7 +1468,8 @@
"VHS_MetadataImage": true,
"VHS_KeepIntermediate": true
},
"category": "Image generation and editing/Edit image"
"category": "Image generation and editing/Edit image",
"description": "Edits images via text instructions using Qwen-Image-Edit-2511 with improved character consistency and integrated LoRA."
}
]
},
@ -1489,4 +1490,4 @@
}
},
"version": 0.4
}
}

View File

@ -1188,7 +1188,8 @@
"extra": {
"workflowRendererVersion": "LG"
},
"category": "Image generation and editing/Inpaint image"
"category": "Image generation and editing/Inpaint image",
"description": "Inpaints masked image regions using Flux.1 fill [dev], Black Forest Labs' inpainting/outpainting model."
}
]
},
@ -1202,4 +1203,4 @@
},
"ue_links": []
}
}
}

View File

@ -1548,7 +1548,8 @@
"extra": {
"workflowRendererVersion": "LG"
},
"category": "Image generation and editing/Inpaint image"
"category": "Image generation and editing/Inpaint image",
"description": "Inpaints masked regions using Qwen-Image, extending its multilingual text rendering to inpainting tasks."
},
{
"id": "56a1f603-fbd2-40ed-94ef-c9ecbd96aca8",
@ -1907,7 +1908,8 @@
],
"extra": {
"workflowRendererVersion": "LG"
}
},
"description": "Expands and softens mask edges to reduce visible seams after image processing."
}
]
},

View File

@ -742,9 +742,10 @@
"extra": {
"workflowRendererVersion": "LG"
},
"category": "Image Tools/Color adjust"
"category": "Image Tools/Color adjust",
"description": "Adjusts black point, white point, and gamma for tonal range control via GPU shader."
}
]
},
"extra": {}
}
}

View File

@ -1919,7 +1919,8 @@
"extra": {
"workflowRendererVersion": "LG"
},
"category": "Image generation and editing/Outpaint image"
"category": "Image generation and editing/Outpaint image",
"description": "Outpaints beyond image boundaries using Qwen-Image's outpainting capabilities."
},
{
"id": "f93c215e-c393-460e-9534-ed2c3d8a652e",
@ -2278,7 +2279,8 @@
],
"extra": {
"workflowRendererVersion": "LG"
}
},
"description": "Expands and softens mask edges to reduce visible seams after image processing."
},
{
"id": "2a4b2cc0-db37-4302-a067-da392f38f06b",
@ -2733,7 +2735,8 @@
],
"extra": {
"workflowRendererVersion": "LG"
}
},
"description": "Scales both image and mask together while preserving alignment for editing workflows."
}
]
},

View File

@ -0,0 +1,714 @@
{
"revision": 0,
"last_node_id": 99,
"last_link_id": 0,
"nodes": [
{
"id": 99,
"type": "6e7ab3ea-96aa-470f-9b94-3d9d0e01f481",
"pos": [
-1630,
-3270
],
"size": [
290,
370
],
"flags": {},
"order": 3,
"mode": 0,
"inputs": [
{
"label": "image",
"localized_name": "image",
"name": "image",
"type": "IMAGE",
"link": null
},
{
"label": "object",
"name": "text",
"type": "STRING",
"widget": {
"name": "text"
},
"link": null
},
{
"name": "bboxes",
"type": "BOUNDING_BOX",
"link": null
},
{
"name": "positive_coords",
"type": "STRING",
"link": null
},
{
"name": "negative_coords",
"type": "STRING",
"link": null
},
{
"name": "threshold",
"type": "FLOAT",
"widget": {
"name": "threshold"
},
"link": null
},
{
"name": "refine_iterations",
"type": "INT",
"widget": {
"name": "refine_iterations"
},
"link": null
},
{
"name": "individual_masks",
"type": "BOOLEAN",
"widget": {
"name": "individual_masks"
},
"link": null
},
{
"name": "ckpt_name",
"type": "COMBO",
"widget": {
"name": "ckpt_name"
},
"link": null
}
],
"outputs": [
{
"localized_name": "masks",
"name": "masks",
"type": "MASK",
"links": []
},
{
"localized_name": "bboxes",
"name": "bboxes",
"type": "BOUNDING_BOX",
"links": []
}
],
"properties": {
"proxyWidgets": [
[
"78",
"text"
],
[
"75",
"threshold"
],
[
"75",
"refine_iterations"
],
[
"75",
"individual_masks"
],
[
"77",
"ckpt_name"
]
],
"ue_properties": {
"widget_ue_connectable": {
"text": true
},
"version": "7.7",
"input_ue_unconnectable": {}
},
"cnr_id": "comfy-core",
"ver": "0.19.3",
"enableTabs": false,
"tabWidth": 65,
"tabXOffset": 10,
"hasSecondTab": false,
"secondTabText": "Send Back",
"secondTabOffset": 80,
"secondTabWidth": 65
},
"widgets_values": [],
"title": "Image Segmentation (SAM3)"
}
],
"links": [],
"version": 0.4,
"definitions": {
"subgraphs": [
{
"id": "6e7ab3ea-96aa-470f-9b94-3d9d0e01f481",
"version": 1,
"state": {
"lastGroupId": 0,
"lastNodeId": 113,
"lastLinkId": 283,
"lastRerouteId": 0
},
"revision": 0,
"config": {},
"name": "Image Segmentation (SAM3)",
"inputNode": {
"id": -10,
"bounding": [
-2260,
-3450,
136.369140625,
220
]
},
"outputNode": {
"id": -20,
"bounding": [
-1130,
-3305,
120,
80
]
},
"inputs": [
{
"id": "a6e75fa2-162a-4af0-a2fd-1e9c899a5ab6",
"name": "image",
"type": "IMAGE",
"linkIds": [
264
],
"localized_name": "image",
"label": "image",
"pos": [
-2143.630859375,
-3430
]
},
{
"id": "3cefd304-7631-4ff6-a5a0-5a0ffb120745",
"name": "text",
"type": "STRING",
"linkIds": [
265
],
"label": "object",
"pos": [
-2143.630859375,
-3410
]
},
{
"id": "1aec91c5-d8d2-441c-928c-49c14e7e80ed",
"name": "bboxes",
"type": "BOUNDING_BOX",
"linkIds": [
266
],
"pos": [
-2143.630859375,
-3390
]
},
{
"id": "1ec7ce1a-8257-4719-8a81-60ebc8a98899",
"name": "positive_coords",
"type": "STRING",
"linkIds": [
267
],
"pos": [
-2143.630859375,
-3370
]
},
{
"id": "c65f8b87-9bd7-48be-9fc2-823431e95019",
"name": "negative_coords",
"type": "STRING",
"linkIds": [
268
],
"pos": [
-2143.630859375,
-3350
]
},
{
"id": "bb4ba35a-ccfe-4c37-98e5-d9b0d69585fb",
"name": "threshold",
"type": "FLOAT",
"linkIds": [
269
],
"pos": [
-2143.630859375,
-3330
]
},
{
"id": "b1439668-b050-490b-a5dc-fc4052c55666",
"name": "refine_iterations",
"type": "INT",
"linkIds": [
270
],
"pos": [
-2143.630859375,
-3310
]
},
{
"id": "86e239e5-c098-4302-b54d-d42a38bc0f89",
"name": "individual_masks",
"type": "BOOLEAN",
"linkIds": [
271
],
"pos": [
-2143.630859375,
-3290
]
},
{
"id": "f9e0b9d4-b2f1-4907-a4a5-305656576706",
"name": "ckpt_name",
"type": "COMBO",
"linkIds": [
272
],
"pos": [
-2143.630859375,
-3270
]
}
],
"outputs": [
{
"id": "ff50da09-1e59-4a58-9b7f-be1a00aa5913",
"name": "masks",
"type": "MASK",
"linkIds": [
231
],
"localized_name": "masks",
"pos": [
-1110,
-3285
]
},
{
"id": "8f622e40-8528-4078-b7d3-147e9f872194",
"name": "bboxes",
"type": "BOUNDING_BOX",
"linkIds": [
232
],
"localized_name": "bboxes",
"pos": [
-1110,
-3265
]
}
],
"widgets": [],
"nodes": [
{
"id": 75,
"type": "SAM3_Detect",
"pos": [
-1470,
-3460
],
"size": [
270,
260
],
"flags": {},
"order": 0,
"mode": 0,
"inputs": [
{
"label": "model",
"localized_name": "model",
"name": "model",
"type": "MODEL",
"link": 237
},
{
"label": "image",
"localized_name": "image",
"name": "image",
"type": "IMAGE",
"link": 264
},
{
"label": "conditioning",
"localized_name": "conditioning",
"name": "conditioning",
"shape": 7,
"type": "CONDITIONING",
"link": 200
},
{
"label": "bboxes",
"localized_name": "bboxes",
"name": "bboxes",
"shape": 7,
"type": "BOUNDING_BOX",
"link": 266
},
{
"label": "positive_coords",
"localized_name": "positive_coords",
"name": "positive_coords",
"shape": 7,
"type": "STRING",
"link": 267
},
{
"label": "negative_coords",
"localized_name": "negative_coords",
"name": "negative_coords",
"shape": 7,
"type": "STRING",
"link": 268
},
{
"localized_name": "threshold",
"name": "threshold",
"type": "FLOAT",
"widget": {
"name": "threshold"
},
"link": 269
},
{
"localized_name": "refine_iterations",
"name": "refine_iterations",
"type": "INT",
"widget": {
"name": "refine_iterations"
},
"link": 270
},
{
"localized_name": "individual_masks",
"name": "individual_masks",
"type": "BOOLEAN",
"widget": {
"name": "individual_masks"
},
"link": 271
}
],
"outputs": [
{
"localized_name": "masks",
"name": "masks",
"type": "MASK",
"links": [
231
]
},
{
"localized_name": "bboxes",
"name": "bboxes",
"type": "BOUNDING_BOX",
"links": [
232
]
}
],
"properties": {
"ue_properties": {
"widget_ue_connectable": {},
"version": "7.7",
"input_ue_unconnectable": {}
},
"cnr_id": "comfy-core",
"ver": "0.19.3",
"Node name for S&R": "SAM3_Detect",
"enableTabs": false,
"tabWidth": 65,
"tabXOffset": 10,
"hasSecondTab": false,
"secondTabText": "Send Back",
"secondTabOffset": 80,
"secondTabWidth": 65
},
"widgets_values": [
0.5,
2,
false
]
},
{
"id": 77,
"type": "CheckpointLoaderSimple",
"pos": [
-1970,
-3200
],
"size": [
330,
140
],
"flags": {},
"order": 1,
"mode": 0,
"inputs": [
{
"localized_name": "ckpt_name",
"name": "ckpt_name",
"type": "COMBO",
"widget": {
"name": "ckpt_name"
},
"link": 272
}
],
"outputs": [
{
"localized_name": "MODEL",
"name": "MODEL",
"type": "MODEL",
"links": [
237
]
},
{
"localized_name": "CLIP",
"name": "CLIP",
"type": "CLIP",
"links": [
240
]
},
{
"localized_name": "VAE",
"name": "VAE",
"type": "VAE",
"links": null
}
],
"properties": {
"ue_properties": {
"widget_ue_connectable": {},
"version": "7.7",
"input_ue_unconnectable": {}
},
"cnr_id": "comfy-core",
"ver": "0.19.3",
"Node name for S&R": "CheckpointLoaderSimple",
"enableTabs": false,
"tabWidth": 65,
"tabXOffset": 10,
"hasSecondTab": false,
"secondTabText": "Send Back",
"secondTabOffset": 80,
"secondTabWidth": 65,
"models": [
{
"name": "sam3.1_multiplex_fp16.safetensors",
"url": "https://huggingface.co/Comfy-Org/sam3.1/resolve/main/checkpoints/sam3.1_multiplex_fp16.safetensors",
"directory": "checkpoints"
}
]
},
"widgets_values": [
"sam3.1_multiplex_fp16.safetensors"
]
},
{
"id": 78,
"type": "CLIPTextEncode",
"pos": [
-2000,
-3000
],
"size": [
400,
200
],
"flags": {},
"order": 2,
"mode": 0,
"inputs": [
{
"localized_name": "clip",
"name": "clip",
"type": "CLIP",
"link": 240
},
{
"localized_name": "text",
"name": "text",
"type": "STRING",
"widget": {
"name": "text"
},
"link": 265
}
],
"outputs": [
{
"localized_name": "CONDITIONING",
"name": "CONDITIONING",
"type": "CONDITIONING",
"links": [
200
]
}
],
"properties": {
"ue_properties": {
"widget_ue_connectable": {},
"version": "7.7",
"input_ue_unconnectable": {}
},
"cnr_id": "comfy-core",
"ver": "0.19.3",
"Node name for S&R": "CLIPTextEncode",
"enableTabs": false,
"tabWidth": 65,
"tabXOffset": 10,
"hasSecondTab": false,
"secondTabText": "Send Back",
"secondTabOffset": 80,
"secondTabWidth": 65
},
"widgets_values": [
""
]
}
],
"groups": [],
"links": [
{
"id": 237,
"origin_id": 77,
"origin_slot": 0,
"target_id": 75,
"target_slot": 0,
"type": "MODEL"
},
{
"id": 200,
"origin_id": 78,
"origin_slot": 0,
"target_id": 75,
"target_slot": 2,
"type": "CONDITIONING"
},
{
"id": 240,
"origin_id": 77,
"origin_slot": 1,
"target_id": 78,
"target_slot": 0,
"type": "CLIP"
},
{
"id": 231,
"origin_id": 75,
"origin_slot": 0,
"target_id": -20,
"target_slot": 0,
"type": "MASK"
},
{
"id": 232,
"origin_id": 75,
"origin_slot": 1,
"target_id": -20,
"target_slot": 1,
"type": "BOUNDING_BOX"
},
{
"id": 264,
"origin_id": -10,
"origin_slot": 0,
"target_id": 75,
"target_slot": 1,
"type": "IMAGE"
},
{
"id": 265,
"origin_id": -10,
"origin_slot": 1,
"target_id": 78,
"target_slot": 1,
"type": "STRING"
},
{
"id": 266,
"origin_id": -10,
"origin_slot": 2,
"target_id": 75,
"target_slot": 3,
"type": "BOUNDING_BOX"
},
{
"id": 267,
"origin_id": -10,
"origin_slot": 3,
"target_id": 75,
"target_slot": 4,
"type": "STRING"
},
{
"id": 268,
"origin_id": -10,
"origin_slot": 4,
"target_id": 75,
"target_slot": 5,
"type": "STRING"
},
{
"id": 269,
"origin_id": -10,
"origin_slot": 5,
"target_id": 75,
"target_slot": 6,
"type": "FLOAT"
},
{
"id": 270,
"origin_id": -10,
"origin_slot": 6,
"target_id": 75,
"target_slot": 7,
"type": "INT"
},
{
"id": 271,
"origin_id": -10,
"origin_slot": 7,
"target_id": 75,
"target_slot": 8,
"type": "BOOLEAN"
},
{
"id": 272,
"origin_id": -10,
"origin_slot": 8,
"target_id": 77,
"target_slot": 0,
"type": "COMBO"
}
],
"extra": {},
"category": "Image Tools/Image Segmentation",
"description": "Segments images into masks using Meta SAM3 from text prompts, points, or boxes."
}
]
},
"extra": {
"ue_links": []
}
}

View File

@ -141,7 +141,7 @@
},
"revision": 0,
"config": {},
"name": "local-Image Upscale(Z-image-Turbo)",
"name": "Image Upscale (Z-image-Turbo)",
"inputNode": {
"id": -10,
"bounding": [
@ -1302,7 +1302,8 @@
"extra": {
"workflowRendererVersion": "LG"
},
"category": "Image generation and editing/Enhance"
"category": "Image generation and editing/Enhance",
"description": "Upscales images to higher resolution using Z-Image-Turbo."
}
]
},

View File

@ -99,7 +99,7 @@
},
"revision": 0,
"config": {},
"name": "local-Image to Depth Map (Lotus)",
"name": "Image to Depth Map (Lotus)",
"inputNode": {
"id": -10,
"bounding": [
@ -948,7 +948,8 @@
"extra": {
"workflowRendererVersion": "LG"
},
"category": "Image generation and editing/Depth to image"
"category": "Image generation and editing/Depth to image",
"description": "Estimates a monocular depth map from an input image using the Lotus depth estimation model."
}
]
},
@ -964,4 +965,4 @@
"workflowRendererVersion": "LG"
},
"version": 0.4
}
}

View File

@ -1586,7 +1586,8 @@
"extra": {
"workflowRendererVersion": "LG"
},
"category": "Image generation and editing/Image to layers"
"category": "Image generation and editing/Image to layers",
"description": "Decomposes an image into variable-resolution RGBA layers for independent editing using Qwen-Image-Layered."
}
]
},

View File

@ -72,7 +72,7 @@
},
"revision": 0,
"config": {},
"name": "local-Image to Model (Hunyuan3d 2.1)",
"name": "Image to 3D Model (Hunyuan3d 2.1)",
"inputNode": {
"id": -10,
"bounding": [
@ -765,7 +765,8 @@
"extra": {
"workflowRendererVersion": "LG"
},
"category": "3D/Image to 3D Model"
"category": "3D/Image to 3D Model",
"description": "Generates 3D mesh models from a single input image using Hunyuan3D 2.0/2.1."
}
]
},

View File

@ -4223,7 +4223,8 @@
"extra": {
"workflowRendererVersion": "Vue-corrected"
},
"category": "Video generation and editing/Image to video"
"category": "Video generation and editing/Image to video",
"description": "Generates video from a single input image using LTX-2.3."
}
]
},

View File

@ -206,7 +206,7 @@
},
"revision": 0,
"config": {},
"name": "local-Image to Video (Wan 2.2)",
"name": "Image to Video (Wan 2.2)",
"inputNode": {
"id": -10,
"bounding": [
@ -2027,7 +2027,8 @@
"extra": {
"workflowRendererVersion": "LG"
},
"category": "Video generation and editing/Image to video"
"category": "Video generation and editing/Image to video",
"description": "Image-to-video with Wan 2.2 using a start image plus text prompt to extend motion from the still frame."
}
]
},

View File

@ -134,7 +134,7 @@
},
"revision": 0,
"config": {},
"name": "local-Pose to Image (Z-Image-Turbo)",
"name": "Pose to Image (Z-Image-Turbo)",
"inputNode": {
"id": -10,
"bounding": [
@ -1298,7 +1298,8 @@
"VHS_MetadataImage": true,
"VHS_KeepIntermediate": true
},
"category": "Image generation and editing/Pose to image"
"category": "Image generation and editing/Pose to image",
"description": "Generates an image from pose keypoints using Z-Image-Turbo with text conditioning."
}
]
},
@ -1319,4 +1320,4 @@
}
},
"version": 0.4
}
}

View File

@ -3870,7 +3870,8 @@
"extra": {
"workflowRendererVersion": "LG"
},
"category": "Video generation and editing/Pose to video"
"category": "Video generation and editing/Pose to video",
"description": "Generates video from pose reference frames using LTX-2, with optional synchronized audio."
}
]
},

View File

@ -270,9 +270,10 @@
"extra": {
"workflowRendererVersion": "LG"
},
"category": "Text generation/Prompt enhance"
"category": "Text generation/Prompt enhance",
"description": "Expands short text prompts into detailed descriptions using a text generation model for better generation quality."
}
]
},
"extra": {}
}
}

View File

@ -0,0 +1,397 @@
{
"revision": 0,
"last_node_id": 19,
"last_link_id": 0,
"nodes": [
{
"id": 19,
"type": "5b40ca21-ba1a-41d5-b403-4d2d7acdc195",
"pos": [
-6411.330578108367,
1940.2638932730042
],
"size": [
349.609375,
145.9375
],
"flags": {},
"order": 2,
"mode": 0,
"inputs": [
{
"localized_name": "image",
"name": "image",
"type": "IMAGE",
"link": null
},
{
"name": "bg_removal_name",
"type": "COMBO",
"widget": {
"name": "bg_removal_name"
},
"link": null
}
],
"outputs": [
{
"localized_name": "IMAGE",
"name": "IMAGE",
"type": "IMAGE",
"links": []
},
{
"name": "mask",
"type": "MASK",
"links": []
}
],
"properties": {
"proxyWidgets": [
[
"14",
"bg_removal_name"
]
]
},
"widgets_values": [],
"title": "Remove Background (BiRefNet)"
}
],
"links": [],
"version": 0.4,
"definitions": {
"subgraphs": [
{
"id": "5b40ca21-ba1a-41d5-b403-4d2d7acdc195",
"version": 1,
"state": {
"lastGroupId": 0,
"lastNodeId": 21,
"lastLinkId": 16,
"lastRerouteId": 0
},
"revision": 0,
"config": {},
"name": "Remove Background (BiRefNet)",
"description": "Removes or replaces image backgrounds using BiRefNet segmentation and alpha compositing.",
"inputNode": {
"id": -10,
"bounding": [
-6728.534070722246,
1475.2619799128663,
150.9140625,
88
]
},
"outputNode": {
"id": -20,
"bounding": [
-6169.049695722246,
1475.2619799128663,
128,
88
]
},
"inputs": [
{
"id": "7bc321cd-df31-4c39-aaf7-7f0d01326189",
"name": "image",
"type": "IMAGE",
"linkIds": [
5,
7
],
"localized_name": "image",
"pos": [
-6601.620008222246,
1499.2619799128663
]
},
{
"id": "e89d2cd8-daa3-4e29-8a69-851db85072cb",
"name": "bg_removal_name",
"type": "COMBO",
"linkIds": [
12
],
"pos": [
-6601.620008222246,
1519.2619799128663
]
}
],
"outputs": [
{
"id": "16e7863c-4c38-46c2-aa74-e82991fbfe8d",
"name": "IMAGE",
"type": "IMAGE",
"linkIds": [
8
],
"localized_name": "IMAGE",
"pos": [
-6145.049695722246,
1499.2619799128663
]
},
{
"id": "f7240c19-5b80-406e-a8e2-9b12440ee2d6",
"name": "mask",
"type": "MASK",
"linkIds": [
11
],
"pos": [
-6145.049695722246,
1519.2619799128663
]
}
],
"widgets": [],
"nodes": [
{
"id": 13,
"type": "RemoveBackground",
"pos": [
-6536.764823982709,
1444.9963409012412
],
"size": [
302.25,
72
],
"flags": {},
"order": 0,
"mode": 0,
"inputs": [
{
"localized_name": "image",
"name": "image",
"type": "IMAGE",
"link": 5
},
{
"localized_name": "bg_removal_model",
"name": "bg_removal_model",
"type": "BACKGROUND_REMOVAL",
"link": 3
}
],
"outputs": [
{
"localized_name": "mask",
"name": "mask",
"type": "MASK",
"links": [
4,
11
]
}
],
"properties": {
"Node name for S&R": "RemoveBackground"
}
},
{
"id": 14,
"type": "LoadBackgroundRemovalModel",
"pos": [
-6540.534070722246,
1302.223464635445
],
"size": [
311.484375,
85.515625
],
"flags": {},
"order": 1,
"mode": 0,
"inputs": [
{
"localized_name": "bg_removal_name",
"name": "bg_removal_name",
"type": "COMBO",
"widget": {
"name": "bg_removal_name"
},
"link": 12
}
],
"outputs": [
{
"localized_name": "bg_model",
"name": "bg_model",
"type": "BACKGROUND_REMOVAL",
"links": [
3
]
}
],
"properties": {
"Node name for S&R": "LoadBackgroundRemovalModel",
"models": [
{
"name": "birefnet.safetensors",
"url": "https://huggingface.co/Comfy-Org/BiRefNet/resolve/main/background_removal/birefnet.safetensors",
"directory": "background_removal"
}
]
},
"widgets_values": [
"birefnet.safetensors"
]
},
{
"id": 15,
"type": "InvertMask",
"pos": [
-6532.446160529669,
1571.1111286839914
],
"size": [
285.984375,
48
],
"flags": {},
"order": 2,
"mode": 0,
"inputs": [
{
"localized_name": "mask",
"name": "mask",
"type": "MASK",
"link": 4
}
],
"outputs": [
{
"localized_name": "MASK",
"name": "MASK",
"type": "MASK",
"links": [
6
]
}
],
"properties": {
"Node name for S&R": "InvertMask"
}
},
{
"id": 16,
"type": "JoinImageWithAlpha",
"pos": [
-6527.4370171636665,
1674.3004951902876
],
"size": [
284.96875,
72
],
"flags": {},
"order": 3,
"mode": 0,
"inputs": [
{
"localized_name": "image",
"name": "image",
"type": "IMAGE",
"link": 7
},
{
"localized_name": "alpha",
"name": "alpha",
"type": "MASK",
"link": 6
}
],
"outputs": [
{
"localized_name": "IMAGE",
"name": "IMAGE",
"type": "IMAGE",
"links": [
8
]
}
],
"properties": {
"Node name for S&R": "JoinImageWithAlpha"
}
}
],
"groups": [],
"links": [
{
"id": 3,
"origin_id": 14,
"origin_slot": 0,
"target_id": 13,
"target_slot": 1,
"type": "BACKGROUND_REMOVAL"
},
{
"id": 4,
"origin_id": 13,
"origin_slot": 0,
"target_id": 15,
"target_slot": 0,
"type": "MASK"
},
{
"id": 6,
"origin_id": 15,
"origin_slot": 0,
"target_id": 16,
"target_slot": 1,
"type": "MASK"
},
{
"id": 5,
"origin_id": -10,
"origin_slot": 0,
"target_id": 13,
"target_slot": 0,
"type": "IMAGE"
},
{
"id": 7,
"origin_id": -10,
"origin_slot": 0,
"target_id": 16,
"target_slot": 0,
"type": "IMAGE"
},
{
"id": 8,
"origin_id": 16,
"origin_slot": 0,
"target_id": -20,
"target_slot": 0,
"type": "IMAGE"
},
{
"id": 11,
"origin_id": 13,
"origin_slot": 0,
"target_id": -20,
"target_slot": 1,
"type": "MASK"
},
{
"id": 12,
"origin_id": -10,
"origin_slot": 1,
"target_id": 14,
"target_slot": 0,
"type": "COMBO"
}
],
"extra": {},
"category": "Image generation and editing/Background Removal"
}
]
},
"extra": {}
}

View File

@ -302,8 +302,9 @@
"extra": {
"workflowRendererVersion": "LG"
},
"category": "Image Tools/Sharpen"
"category": "Image Tools/Sharpen",
"description": "Sharpens image details using a GPU fragment shader for enhanced clarity."
}
]
}
}
}

View File

@ -222,7 +222,7 @@
},
"revision": 0,
"config": {},
"name": "local-Text to Audio (ACE-Step 1.5)",
"name": "Text to Audio (ACE-Step 1.5)",
"inputNode": {
"id": -10,
"bounding": [
@ -1502,7 +1502,8 @@
"extra": {
"workflowRendererVersion": "LG"
},
"category": "Audio/Music generation"
"category": "Audio/Music generation",
"description": "Generates audio/music from text prompts using ACE-Step 1.5, a diffusion-based audio generation model."
}
]
},
@ -1518,4 +1519,4 @@
}
},
"version": 0.4
}
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1029,7 +1029,8 @@
"extra": {
"workflowRendererVersion": "LG"
},
"category": "Image generation and editing/Text to image"
"category": "Image generation and editing/Text to image",
"description": "Generates images from prompts using FLUX.1 [dev]: a 12B rectified-flow MMDiT with dual CLIP plus T5-XXL text encoders and guidance-distilled sampling for sharp prompt following versus classic DDPM diffusion."
}
]
},
@ -1043,4 +1044,4 @@
},
"ue_links": []
}
}
}

View File

@ -1023,7 +1023,8 @@
"extra": {
"workflowRendererVersion": "LG"
},
"category": "Image generation and editing/Text to image"
"category": "Image generation and editing/Text to image",
"description": "FLUX.1 Krea [dev] (Black Forest Labs × Krea): open-weight 12B rectified-flow text-to-image drop-in alongside FLUX.1 [dev], tuned away from overcooked saturation toward more natural diversity in people, realism, and style while keeping ecosystem compatibility."
}
]
},
@ -1037,4 +1038,4 @@
},
"ue_links": []
}
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1104,7 +1104,8 @@
"extra": {
"workflowRendererVersion": "LG"
},
"category": "Image generation and editing/Text to image"
"category": "Image generation and editing/Text to image",
"description": "Generates images from text prompts using NetaYume Lumina, fine-tuned from Neta Lumina for anime-style and illustration generation."
},
{
"id": "a07fdf06-1bda-4dac-bdbd-63ee8ebca1c9",
@ -1458,11 +1459,12 @@
],
"extra": {
"workflowRendererVersion": "LG"
}
},
"description": "Encodes a negative text prompt via CLIP for classifier-free guidance in anime-style generation (NetaYume Lumina)."
}
]
},
"extra": {
"ue_links": []
}
}
}

View File

@ -1941,7 +1941,8 @@
"extra": {
"workflowRendererVersion": "Vue-corrected"
},
"category": "Image generation and editing/Text to image"
"category": "Image generation and editing/Text to image",
"description": "Generates images from text prompts using Qwen-Image-2512, with enhanced human realism and finer natural detail over the base version."
}
]
},

View File

@ -1873,7 +1873,8 @@
"extra": {
"workflowRendererVersion": "LG"
},
"category": "Image generation and editing/Text to image"
"category": "Image generation and editing/Text to image",
"description": "Generates images from text prompts using Qwen-Image, Alibaba's 20B MMDiT model with excellent multilingual text rendering."
}
]
},

File diff suppressed because it is too large Load Diff

View File

@ -1,22 +1,21 @@
{
"id": "1c3eaa76-5cfa-4dc7-8571-97a570324e01",
"revision": 0,
"last_node_id": 34,
"last_link_id": 40,
"last_node_id": 57,
"last_link_id": 0,
"nodes": [
{
"id": 5,
"type": "dfe9eb32-97c0-43a5-90d5-4fd37768d91b",
"id": 57,
"type": "f2fdebf6-dfaf-43b6-9eb2-7f70613cfdc1",
"pos": [
-2.5766491043910378e-05,
1229.999928629805
130,
200
],
"size": [
400,
470
],
"flags": {},
"order": 0,
"order": 1,
"mode": 0,
"inputs": [
{
@ -44,6 +43,22 @@
},
"link": null
},
{
"name": "seed",
"type": "INT",
"widget": {
"name": "seed"
},
"link": null
},
{
"name": "steps",
"type": "INT",
"widget": {
"name": "steps"
},
"link": null
},
{
"name": "unet_name",
"type": "COMBO",
@ -80,15 +95,15 @@
"properties": {
"proxyWidgets": [
[
"-1",
"27",
"text"
],
[
"-1",
"13",
"width"
],
[
"-1",
"13",
"height"
],
[
@ -97,19 +112,23 @@
],
[
"3",
"control_after_generate"
"steps"
],
[
"-1",
"28",
"unet_name"
],
[
"-1",
"30",
"clip_name"
],
[
"-1",
"29",
"vae_name"
],
[
"3",
"control_after_generate"
]
],
"cnr_id": "comfy-core",
@ -122,48 +141,40 @@
"secondTabOffset": 80,
"secondTabWidth": 65
},
"widgets_values": [
"",
1024,
1024,
null,
null,
"z_image_turbo_bf16.safetensors",
"qwen_3_4b.safetensors",
"ae.safetensors"
]
"widgets_values": [],
"title": "Text to Image (Z-Image-Turbo)"
}
],
"links": [],
"groups": [],
"version": 0.4,
"definitions": {
"subgraphs": [
{
"id": "dfe9eb32-97c0-43a5-90d5-4fd37768d91b",
"id": "f2fdebf6-dfaf-43b6-9eb2-7f70613cfdc1",
"version": 1,
"state": {
"lastGroupId": 4,
"lastNodeId": 34,
"lastLinkId": 40,
"lastNodeId": 61,
"lastLinkId": 75,
"lastRerouteId": 0
},
"revision": 0,
"config": {},
"name": "local-Text to Image (Z-Image-Turbo)",
"name": "Text to Image (Z-Image-Turbo)",
"inputNode": {
"id": -10,
"bounding": [
-80,
425,
-560,
480,
120,
160
200
]
},
"outputNode": {
"id": -20,
"bounding": [
1490,
415,
1670,
320,
120,
60
]
@ -178,8 +189,8 @@
],
"label": "prompt",
"pos": [
20,
445
-460,
500
]
},
{
@ -190,8 +201,8 @@
35
],
"pos": [
20,
465
-460,
520
]
},
{
@ -202,44 +213,68 @@
36
],
"pos": [
20,
485
-460,
540
]
},
{
"id": "23087d15-8412-4fbd-b71e-9b6d7ef76de1",
"id": "f77677f7-6bf6-4c19-a71f-c4a553d5981e",
"name": "seed",
"type": "INT",
"linkIds": [
71
],
"pos": [
-460,
560
]
},
{
"id": "ef9a9fb1-5983-4bc9-a60b-cf5aec48bff1",
"name": "steps",
"type": "INT",
"linkIds": [
72
],
"pos": [
-460,
580
]
},
{
"id": "a20a1b30-785f-4a04-bb6d-3d61adab9764",
"name": "unet_name",
"type": "COMBO",
"linkIds": [
38
73
],
"pos": [
20,
505
-460,
600
]
},
{
"id": "0677f5c3-2a3f-43d4-98ac-a4c56d5efdc0",
"id": "4af8fc2b-4655-4086-8240-45f8cb38c6f6",
"name": "clip_name",
"type": "COMBO",
"linkIds": [
39
74
],
"pos": [
20,
525
-460,
620
]
},
{
"id": "c85c0445-2641-48b1-bbca-95057edf2fcf",
"id": "4d518693-2807-439c-9cb6-cffd23ccba2c",
"name": "vae_name",
"type": "COMBO",
"linkIds": [
40
75
],
"pos": [
20,
545
-460,
640
]
}
],
@ -253,8 +288,8 @@
],
"localized_name": "IMAGE",
"pos": [
1510,
435
1690,
340
]
}
],
@ -264,15 +299,15 @@
"id": 30,
"type": "CLIPLoader",
"pos": [
109.99997264844609,
329.99999029608756
30,
420
],
"size": [
269.9869791666667,
106
270,
150
],
"flags": {},
"order": 0,
"order": 7,
"mode": 0,
"inputs": [
{
@ -282,7 +317,7 @@
"widget": {
"name": "clip_name"
},
"link": 39
"link": 74
},
{
"localized_name": "type",
@ -315,9 +350,9 @@
}
],
"properties": {
"Node name for S&R": "CLIPLoader",
"cnr_id": "comfy-core",
"ver": "0.3.73",
"Node name for S&R": "CLIPLoader",
"models": [
{
"name": "qwen_3_4b.safetensors",
@ -343,15 +378,15 @@
"id": 29,
"type": "VAELoader",
"pos": [
109.99997264844609,
479.9999847172637
30,
650
],
"size": [
269.9869791666667,
58
270,
110
],
"flags": {},
"order": 1,
"order": 6,
"mode": 0,
"inputs": [
{
@ -361,7 +396,7 @@
"widget": {
"name": "vae_name"
},
"link": 40
"link": 75
}
],
"outputs": [
@ -375,9 +410,9 @@
}
],
"properties": {
"Node name for S&R": "VAELoader",
"cnr_id": "comfy-core",
"ver": "0.3.73",
"Node name for S&R": "VAELoader",
"models": [
{
"name": "ae.safetensors",
@ -401,12 +436,12 @@
"id": 33,
"type": "ConditioningZeroOut",
"pos": [
639.9999103333332,
620.0000271257795
630,
960
],
"size": [
204.134765625,
26
230,
80
],
"flags": {},
"order": 8,
@ -430,9 +465,9 @@
}
],
"properties": {
"Node name for S&R": "ConditioningZeroOut",
"cnr_id": "comfy-core",
"ver": "0.3.73",
"Node name for S&R": "ConditioningZeroOut",
"enableTabs": false,
"tabWidth": 65,
"tabXOffset": 10,
@ -440,22 +475,21 @@
"secondTabText": "Send Back",
"secondTabOffset": 80,
"secondTabWidth": 65
},
"widgets_values": []
}
},
{
"id": 8,
"type": "VAEDecode",
"pos": [
1219.9999088104782,
160.00009184959066
1320,
230
],
"size": [
209.98697916666669,
46
230,
100
],
"flags": {},
"order": 5,
"order": 1,
"mode": 0,
"inputs": [
{
@ -483,9 +517,9 @@
}
],
"properties": {
"Node name for S&R": "VAEDecode",
"cnr_id": "comfy-core",
"ver": "0.3.64",
"Node name for S&R": "VAEDecode",
"enableTabs": false,
"tabWidth": 65,
"tabXOffset": 10,
@ -493,22 +527,21 @@
"secondTabText": "Send Back",
"secondTabOffset": 80,
"secondTabWidth": 65
},
"widgets_values": []
}
},
{
"id": 28,
"type": "UNETLoader",
"pos": [
109.99997264844609,
200.0000502647102
30,
230
],
"size": [
269.9869791666667,
82
270,
110
],
"flags": {},
"order": 2,
"order": 5,
"mode": 0,
"inputs": [
{
@ -518,7 +551,7 @@
"widget": {
"name": "unet_name"
},
"link": 38
"link": 73
},
{
"localized_name": "weight_dtype",
@ -541,9 +574,9 @@
}
],
"properties": {
"Node name for S&R": "UNETLoader",
"cnr_id": "comfy-core",
"ver": "0.3.73",
"Node name for S&R": "UNETLoader",
"models": [
{
"name": "z_image_turbo_bf16.safetensors",
@ -568,15 +601,15 @@
"id": 27,
"type": "CLIPTextEncode",
"pos": [
429.99997828947767,
200.0000502647102
400,
230
],
"size": [
409.9869791666667,
319.9869791666667
450,
650
],
"flags": {},
"order": 7,
"order": 4,
"mode": 0,
"inputs": [
{
@ -607,9 +640,9 @@
}
],
"properties": {
"Node name for S&R": "CLIPTextEncode",
"cnr_id": "comfy-core",
"ver": "0.3.73",
"Node name for S&R": "CLIPTextEncode",
"enableTabs": false,
"tabWidth": 65,
"tabXOffset": 10,
@ -626,15 +659,15 @@
"id": 13,
"type": "EmptySD3LatentImage",
"pos": [
109.99997264844609,
629.9999791384399
40,
890
],
"size": [
259.9869791666667,
106
260,
170
],
"flags": {},
"order": 6,
"order": 3,
"mode": 0,
"inputs": [
{
@ -677,9 +710,9 @@
}
],
"properties": {
"Node name for S&R": "EmptySD3LatentImage",
"cnr_id": "comfy-core",
"ver": "0.3.64",
"Node name for S&R": "EmptySD3LatentImage",
"enableTabs": false,
"tabWidth": 65,
"tabXOffset": 10,
@ -694,19 +727,77 @@
1
]
},
{
"id": 11,
"type": "ModelSamplingAuraFlow",
"pos": [
950,
230
],
"size": [
310,
110
],
"flags": {},
"order": 2,
"mode": 0,
"inputs": [
{
"localized_name": "model",
"name": "model",
"type": "MODEL",
"link": 26
},
{
"localized_name": "shift",
"name": "shift",
"type": "FLOAT",
"widget": {
"name": "shift"
},
"link": null
}
],
"outputs": [
{
"localized_name": "MODEL",
"name": "MODEL",
"type": "MODEL",
"slot_index": 0,
"links": [
13
]
}
],
"properties": {
"Node name for S&R": "ModelSamplingAuraFlow",
"cnr_id": "comfy-core",
"ver": "0.3.64",
"enableTabs": false,
"tabWidth": 65,
"tabXOffset": 10,
"hasSecondTab": false,
"secondTabText": "Send Back",
"secondTabOffset": 80,
"secondTabWidth": 65
},
"widgets_values": [
3
]
},
{
"id": 3,
"type": "KSampler",
"pos": [
879.9999615530063,
269.9999774911694
950,
400
],
"size": [
314.9869791666667,
262
320,
350
],
"flags": {},
"order": 4,
"order": 0,
"mode": 0,
"inputs": [
{
@ -740,7 +831,7 @@
"widget": {
"name": "seed"
},
"link": null
"link": 71
},
{
"localized_name": "steps",
@ -749,7 +840,7 @@
"widget": {
"name": "steps"
},
"link": null
"link": 72
},
{
"localized_name": "cfg",
@ -800,9 +891,9 @@
}
],
"properties": {
"Node name for S&R": "KSampler",
"cnr_id": "comfy-core",
"ver": "0.3.64",
"Node name for S&R": "KSampler",
"enableTabs": false,
"tabWidth": 65,
"tabXOffset": 10,
@ -814,81 +905,23 @@
"widgets_values": [
0,
"randomize",
4,
8,
1,
"res_multistep",
"simple",
1
]
},
{
"id": 11,
"type": "ModelSamplingAuraFlow",
"pos": [
879.9999615530063,
160.00009184959066
],
"size": [
309.9869791666667,
58
],
"flags": {},
"order": 3,
"mode": 0,
"inputs": [
{
"localized_name": "model",
"name": "model",
"type": "MODEL",
"link": 26
},
{
"localized_name": "shift",
"name": "shift",
"type": "FLOAT",
"widget": {
"name": "shift"
},
"link": null
}
],
"outputs": [
{
"localized_name": "MODEL",
"name": "MODEL",
"type": "MODEL",
"slot_index": 0,
"links": [
13
]
}
],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.3.64",
"Node name for S&R": "ModelSamplingAuraFlow",
"enableTabs": false,
"tabWidth": 65,
"tabXOffset": 10,
"hasSecondTab": false,
"secondTabText": "Send Back",
"secondTabOffset": 80,
"secondTabWidth": 65
},
"widgets_values": [
3
]
}
],
"groups": [
{
"id": 2,
"title": "Image size",
"title": "Step2 - Image size",
"bounding": [
100,
560,
290,
200
10,
820,
320,
280
],
"color": "#3f789e",
"font_size": 24,
@ -896,12 +929,12 @@
},
{
"id": 3,
"title": "Prompt",
"title": "Step3 - Prompt",
"bounding": [
410,
360,
130,
450,
540
530,
970
],
"color": "#3f789e",
"font_size": 24,
@ -909,12 +942,12 @@
},
{
"id": 4,
"title": "Models",
"title": "Step1 - Load models",
"bounding": [
100,
0,
130,
290,
413.6
330,
660
],
"color": "#3f789e",
"font_size": 24,
@ -1027,25 +1060,41 @@
"type": "INT"
},
{
"id": 38,
"id": 71,
"origin_id": -10,
"origin_slot": 3,
"target_id": 3,
"target_slot": 4,
"type": "INT"
},
{
"id": 72,
"origin_id": -10,
"origin_slot": 4,
"target_id": 3,
"target_slot": 5,
"type": "INT"
},
{
"id": 73,
"origin_id": -10,
"origin_slot": 5,
"target_id": 28,
"target_slot": 0,
"type": "COMBO"
},
{
"id": 39,
"id": 74,
"origin_id": -10,
"origin_slot": 4,
"origin_slot": 6,
"target_id": 30,
"target_slot": 0,
"type": "COMBO"
},
{
"id": 40,
"id": 75,
"origin_id": -10,
"origin_slot": 5,
"origin_slot": 7,
"target_id": 29,
"target_slot": 0,
"type": "COMBO"
@ -1054,25 +1103,10 @@
"extra": {
"workflowRendererVersion": "LG"
},
"category": "Image generation and editing/Text to image"
"category": "Image generation and editing/Text to image",
"description": "Generates images from text prompts using Z-Image-Turbo, Alibaba's distilled 6B DiT model."
}
]
},
"config": {},
"extra": {
"frontendVersion": "1.37.10",
"workflowRendererVersion": "LG",
"VHS_latentpreview": false,
"VHS_latentpreviewrate": 0,
"VHS_MetadataImage": true,
"VHS_KeepIntermediate": true,
"ds": {
"scale": 0.8401370345180755,
"offset": [
940.0587067393087,
-830.7121087564725
]
}
},
"version": 0.4
}
"extra": {}
}

File diff suppressed because it is too large Load Diff

View File

@ -4286,7 +4286,8 @@
"extra": {
"workflowRendererVersion": "Vue-corrected"
},
"category": "Video generation and editing/Text to video"
"category": "Video generation and editing/Text to video",
"description": "Generates video from text prompts using LTX-2.3, Lightricks' video diffusion model."
}
]
},

View File

@ -1572,7 +1572,8 @@
"extra": {
"workflowRendererVersion": "LG"
},
"category": "Video generation and editing/Text to video"
"category": "Video generation and editing/Text to video",
"description": "Generates video from text prompts using Wan2.2, Alibaba's diffusion video model."
}
]
},
@ -1586,4 +1587,4 @@
"VHS_KeepIntermediate": true
},
"version": 0.4
}
}

View File

@ -434,8 +434,9 @@
"extra": {
"workflowRendererVersion": "LG"
},
"category": "Image Tools/Sharpen"
"category": "Image Tools/Sharpen",
"description": "Enhances edge contrast via unsharp masking for a sharper image appearance."
}
]
}
}
}

View File

@ -307,7 +307,8 @@
"extra": {
"workflowRendererVersion": "LG"
},
"category": "Text generation/Video Captioning"
"category": "Text generation/Video Captioning",
"description": "Generates descriptive captions for video input using Google's Gemini multimodal LLM."
}
]
}

View File

@ -165,7 +165,7 @@
},
"revision": 0,
"config": {},
"name": "local-Video Inpaint(Wan2.1 VACE)",
"name": "Video Inpaint (Wan 2.1 VACE)",
"inputNode": {
"id": -10,
"bounding": [
@ -2368,7 +2368,8 @@
"extra": {
"workflowRendererVersion": "LG"
},
"category": "Video generation and editing/Inpaint video"
"category": "Video generation and editing/Inpaint video",
"description": "Inpaints masked regions in video frames using Wan 2.1 VACE."
}
]
},

View File

@ -0,0 +1,827 @@
{
"revision": 0,
"last_node_id": 130,
"last_link_id": 0,
"nodes": [
{
"id": 130,
"type": "7937cf78-b52b-40a3-93b2-b4e2e5f98df1",
"pos": [
-1210,
-2780
],
"size": [
300,
370
],
"flags": {},
"order": 3,
"mode": 0,
"inputs": [
{
"name": "video",
"type": "VIDEO",
"link": null
},
{
"name": "text",
"type": "STRING",
"widget": {
"name": "text"
},
"link": null
},
{
"name": "bboxes",
"type": "BOUNDING_BOX",
"link": null
},
{
"name": "positive_coords",
"type": "STRING",
"link": null
},
{
"name": "negative_coords",
"type": "STRING",
"link": null
},
{
"name": "threshold",
"type": "FLOAT",
"widget": {
"name": "threshold"
},
"link": null
},
{
"name": "refine_iterations",
"type": "INT",
"widget": {
"name": "refine_iterations"
},
"link": null
},
{
"name": "individual_masks",
"type": "BOOLEAN",
"widget": {
"name": "individual_masks"
},
"link": null
},
{
"name": "ckpt_name",
"type": "COMBO",
"widget": {
"name": "ckpt_name"
},
"link": null
}
],
"outputs": [
{
"localized_name": "masks",
"name": "masks",
"type": "MASK",
"links": []
},
{
"localized_name": "bboxes",
"name": "bboxes",
"type": "BOUNDING_BOX",
"links": []
},
{
"name": "audio",
"type": "AUDIO",
"links": null
},
{
"name": "fps",
"type": "FLOAT",
"links": null
}
],
"properties": {
"proxyWidgets": [
[
"125",
"text"
],
[
"126",
"threshold"
],
[
"126",
"refine_iterations"
],
[
"126",
"individual_masks"
],
[
"127",
"ckpt_name"
]
],
"cnr_id": "comfy-core",
"ver": "0.19.3",
"enableTabs": false,
"tabWidth": 65,
"tabXOffset": 10,
"hasSecondTab": false,
"secondTabText": "Send Back",
"secondTabOffset": 80,
"secondTabWidth": 65
},
"widgets_values": [],
"title": "Video Segmentation (SAM3)"
}
],
"links": [],
"version": 0.4,
"definitions": {
"subgraphs": [
{
"id": "7937cf78-b52b-40a3-93b2-b4e2e5f98df1",
"version": 1,
"state": {
"lastGroupId": 0,
"lastNodeId": 130,
"lastLinkId": 299,
"lastRerouteId": 0
},
"revision": 0,
"config": {},
"name": "Video Segmentation (SAM3)",
"inputNode": {
"id": -10,
"bounding": [
-2260,
-3450,
136.369140625,
220
]
},
"outputNode": {
"id": -20,
"bounding": [
-1050,
-3510,
120,
120
]
},
"inputs": [
{
"id": "680ffd88-32fe-48be-88d6-91ea44d5eaee",
"name": "video",
"type": "VIDEO",
"linkIds": [
252
],
"pos": [
-2143.630859375,
-3430
]
},
{
"id": "ceaf249c-32d7-4624-8bf6-e590e347ed90",
"name": "text",
"type": "STRING",
"linkIds": [
254
],
"pos": [
-2143.630859375,
-3410
]
},
{
"id": "1ffbff36-da0c-4854-8cb4-88ad31e64f99",
"name": "bboxes",
"type": "BOUNDING_BOX",
"linkIds": [
255
],
"pos": [
-2143.630859375,
-3390
]
},
{
"id": "67b7f4c7-cec0-4e00-b154-23cc1abf880e",
"name": "positive_coords",
"type": "STRING",
"linkIds": [
256
],
"pos": [
-2143.630859375,
-3370
]
},
{
"id": "b090a498-2bde-46b9-9554-18501401d687",
"name": "negative_coords",
"type": "STRING",
"linkIds": [
257
],
"pos": [
-2143.630859375,
-3350
]
},
{
"id": "1a76dfcf-ce95-46af-bba5-c42160c683dd",
"name": "threshold",
"type": "FLOAT",
"linkIds": [
261
],
"pos": [
-2143.630859375,
-3330
]
},
{
"id": "999523fa-c476-4c53-80c3-0a2f554d18ab",
"name": "refine_iterations",
"type": "INT",
"linkIds": [
262
],
"pos": [
-2143.630859375,
-3310
]
},
{
"id": "d2371011-7fe5-4a39-b0c1-df2e0bbd6ece",
"name": "individual_masks",
"type": "BOOLEAN",
"linkIds": [
263
],
"pos": [
-2143.630859375,
-3290
]
},
{
"id": "675a8b37-17db-48d1-853c-2fe5d6a74582",
"name": "ckpt_name",
"type": "COMBO",
"linkIds": [
273
],
"pos": [
-2143.630859375,
-3270
]
}
],
"outputs": [
{
"id": "ff50da09-1e59-4a58-9b7f-be1a00aa5913",
"name": "masks",
"type": "MASK",
"linkIds": [
231
],
"localized_name": "masks",
"pos": [
-1030,
-3490
]
},
{
"id": "8f622e40-8528-4078-b7d3-147e9f872194",
"name": "bboxes",
"type": "BOUNDING_BOX",
"linkIds": [
232
],
"localized_name": "bboxes",
"pos": [
-1030,
-3470
]
},
{
"id": "6c9924ec-f0fa-4509-83ea-8f97f5889bcc",
"name": "audio",
"type": "AUDIO",
"linkIds": [
259
],
"pos": [
-1030,
-3450
]
},
{
"id": "82c1cddc-ab11-44eb-9e2f-1a5c7ea5645b",
"name": "fps",
"type": "FLOAT",
"linkIds": [
260
],
"pos": [
-1030,
-3430
]
}
],
"widgets": [],
"nodes": [
{
"id": 125,
"type": "CLIPTextEncode",
"pos": [
-2010,
-3040
],
"size": [
400,
200
],
"flags": {},
"order": 1,
"mode": 0,
"inputs": [
{
"localized_name": "clip",
"name": "clip",
"type": "CLIP",
"link": 240
},
{
"localized_name": "text",
"name": "text",
"type": "STRING",
"widget": {
"name": "text"
},
"link": 254
}
],
"outputs": [
{
"localized_name": "CONDITIONING",
"name": "CONDITIONING",
"type": "CONDITIONING",
"links": [
200
]
}
],
"properties": {
"Node name for S&R": "CLIPTextEncode",
"cnr_id": "comfy-core",
"ver": "0.19.3",
"enableTabs": false,
"tabWidth": 65,
"tabXOffset": 10,
"hasSecondTab": false,
"secondTabText": "Send Back",
"secondTabOffset": 80,
"secondTabWidth": 65
},
"widgets_values": [
""
]
},
{
"id": 126,
"type": "SAM3_Detect",
"pos": [
-1520,
-3520
],
"size": [
270,
290
],
"flags": {},
"order": 2,
"mode": 0,
"inputs": [
{
"label": "model",
"localized_name": "model",
"name": "model",
"type": "MODEL",
"link": 237
},
{
"label": "image",
"localized_name": "image",
"name": "image",
"type": "IMAGE",
"link": 253
},
{
"label": "conditioning",
"localized_name": "conditioning",
"name": "conditioning",
"shape": 7,
"type": "CONDITIONING",
"link": 200
},
{
"label": "bboxes",
"localized_name": "bboxes",
"name": "bboxes",
"shape": 7,
"type": "BOUNDING_BOX",
"link": 255
},
{
"label": "positive_coords",
"localized_name": "positive_coords",
"name": "positive_coords",
"shape": 7,
"type": "STRING",
"link": 256
},
{
"label": "negative_coords",
"localized_name": "negative_coords",
"name": "negative_coords",
"shape": 7,
"type": "STRING",
"link": 257
},
{
"localized_name": "threshold",
"name": "threshold",
"type": "FLOAT",
"widget": {
"name": "threshold"
},
"link": 261
},
{
"localized_name": "refine_iterations",
"name": "refine_iterations",
"type": "INT",
"widget": {
"name": "refine_iterations"
},
"link": 262
},
{
"localized_name": "individual_masks",
"name": "individual_masks",
"type": "BOOLEAN",
"widget": {
"name": "individual_masks"
},
"link": 263
}
],
"outputs": [
{
"localized_name": "masks",
"name": "masks",
"type": "MASK",
"links": [
231
]
},
{
"localized_name": "bboxes",
"name": "bboxes",
"type": "BOUNDING_BOX",
"links": [
232
]
}
],
"properties": {
"Node name for S&R": "SAM3_Detect",
"cnr_id": "comfy-core",
"ver": "0.19.3",
"enableTabs": false,
"tabWidth": 65,
"tabXOffset": 10,
"hasSecondTab": false,
"secondTabText": "Send Back",
"secondTabOffset": 80,
"secondTabWidth": 65
},
"widgets_values": [
0.5,
2,
false
]
},
{
"id": 127,
"type": "CheckpointLoaderSimple",
"pos": [
-1970,
-3310
],
"size": [
330,
160
],
"flags": {},
"order": 3,
"mode": 0,
"inputs": [
{
"localized_name": "ckpt_name",
"name": "ckpt_name",
"type": "COMBO",
"widget": {
"name": "ckpt_name"
},
"link": 273
}
],
"outputs": [
{
"localized_name": "MODEL",
"name": "MODEL",
"type": "MODEL",
"links": [
237
]
},
{
"localized_name": "CLIP",
"name": "CLIP",
"type": "CLIP",
"links": [
240
]
},
{
"localized_name": "VAE",
"name": "VAE",
"type": "VAE",
"links": null
}
],
"properties": {
"Node name for S&R": "CheckpointLoaderSimple",
"cnr_id": "comfy-core",
"ver": "0.19.3",
"enableTabs": false,
"tabWidth": 65,
"tabXOffset": 10,
"hasSecondTab": false,
"secondTabText": "Send Back",
"secondTabOffset": 80,
"secondTabWidth": 65,
"models": [
{
"name": "sam3.1_multiplex_fp16.safetensors",
"url": "https://huggingface.co/Comfy-Org/sam3.1/resolve/main/checkpoints/sam3.1_multiplex_fp16.safetensors",
"directory": "checkpoints"
}
]
},
"widgets_values": [
"sam3.1_multiplex_fp16.safetensors"
]
},
{
"id": 128,
"type": "GetVideoComponents",
"pos": [
-1910,
-3540
],
"size": [
230,
120
],
"flags": {},
"order": 4,
"mode": 0,
"inputs": [
{
"localized_name": "video",
"name": "video",
"type": "VIDEO",
"link": 252
}
],
"outputs": [
{
"localized_name": "images",
"name": "images",
"type": "IMAGE",
"links": [
253
]
},
{
"localized_name": "audio",
"name": "audio",
"type": "AUDIO",
"links": [
259
]
},
{
"localized_name": "fps",
"name": "fps",
"type": "FLOAT",
"links": [
260
]
}
],
"properties": {
"Node name for S&R": "GetVideoComponents",
"cnr_id": "comfy-core",
"ver": "0.19.3",
"enableTabs": false,
"tabWidth": 65,
"tabXOffset": 10,
"hasSecondTab": false,
"secondTabText": "Send Back",
"secondTabOffset": 80,
"secondTabWidth": 65
}
},
{
"id": 129,
"type": "Note",
"pos": [
-1980,
-2790
],
"size": [
370,
250
],
"flags": {},
"order": 0,
"mode": 0,
"inputs": [],
"outputs": [],
"title": "Note: Prompt format",
"properties": {},
"widgets_values": [
"Max tokens for this model is only 32, to separately prompt multiple subjects you can separate prompts with comma, and set the max amount of objects detected for each prompt with :N\n\nFor example above test prompt finds 2 cakes, one apron, 4 window panels"
],
"color": "#432",
"bgcolor": "#653"
}
],
"groups": [],
"links": [
{
"id": 237,
"origin_id": 127,
"origin_slot": 0,
"target_id": 126,
"target_slot": 0,
"type": "MODEL"
},
{
"id": 200,
"origin_id": 125,
"origin_slot": 0,
"target_id": 126,
"target_slot": 2,
"type": "CONDITIONING"
},
{
"id": 240,
"origin_id": 127,
"origin_slot": 1,
"target_id": 125,
"target_slot": 0,
"type": "CLIP"
},
{
"id": 231,
"origin_id": 126,
"origin_slot": 0,
"target_id": -20,
"target_slot": 0,
"type": "MASK"
},
{
"id": 232,
"origin_id": 126,
"origin_slot": 1,
"target_id": -20,
"target_slot": 1,
"type": "BOUNDING_BOX"
},
{
"id": 252,
"origin_id": -10,
"origin_slot": 0,
"target_id": 128,
"target_slot": 0,
"type": "VIDEO"
},
{
"id": 253,
"origin_id": 128,
"origin_slot": 0,
"target_id": 126,
"target_slot": 1,
"type": "IMAGE"
},
{
"id": 254,
"origin_id": -10,
"origin_slot": 1,
"target_id": 125,
"target_slot": 1,
"type": "STRING"
},
{
"id": 255,
"origin_id": -10,
"origin_slot": 2,
"target_id": 126,
"target_slot": 3,
"type": "BOUNDING_BOX"
},
{
"id": 256,
"origin_id": -10,
"origin_slot": 3,
"target_id": 126,
"target_slot": 4,
"type": "STRING"
},
{
"id": 257,
"origin_id": -10,
"origin_slot": 4,
"target_id": 126,
"target_slot": 5,
"type": "STRING"
},
{
"id": 259,
"origin_id": 128,
"origin_slot": 1,
"target_id": -20,
"target_slot": 2,
"type": "AUDIO"
},
{
"id": 260,
"origin_id": 128,
"origin_slot": 2,
"target_id": -20,
"target_slot": 3,
"type": "FLOAT"
},
{
"id": 261,
"origin_id": -10,
"origin_slot": 5,
"target_id": 126,
"target_slot": 6,
"type": "FLOAT"
},
{
"id": 262,
"origin_id": -10,
"origin_slot": 6,
"target_id": 126,
"target_slot": 7,
"type": "INT"
},
{
"id": 263,
"origin_id": -10,
"origin_slot": 7,
"target_id": 126,
"target_slot": 8,
"type": "BOOLEAN"
},
{
"id": 273,
"origin_id": -10,
"origin_slot": 8,
"target_id": 127,
"target_slot": 0,
"type": "COMBO"
}
],
"extra": {},
"category": "Video Tools",
"description": "Segments video into temporally consistent masks using Meta SAM3 from text or interactive prompts."
}
]
},
"extra": {}
}

View File

@ -1,21 +1,21 @@
{
"revision": 0,
"last_node_id": 84,
"last_node_id": 85,
"last_link_id": 0,
"nodes": [
{
"id": 84,
"type": "8e8aa94a-647e-436d-8440-8ee4691864de",
"id": 85,
"type": "637913e7-0206-46ba-8ded-70ae3a7c2e19",
"pos": [
-6100,
2620
-880,
-2260
],
"size": [
290,
160
],
"flags": {},
"order": 0,
"order": 2,
"mode": 0,
"inputs": [
{
@ -76,31 +76,26 @@
"properties": {
"proxyWidgets": [
[
"-1",
"79",
"direction"
],
[
"-1",
"79",
"match_image_size"
],
[
"-1",
"79",
"spacing_width"
],
[
"-1",
"79",
"spacing_color"
]
],
"cnr_id": "comfy-core",
"ver": "0.13.0"
},
"widgets_values": [
"right",
true,
0,
"white"
],
"widgets_values": [],
"title": "Video Stitch"
}
],
@ -109,12 +104,12 @@
"definitions": {
"subgraphs": [
{
"id": "8e8aa94a-647e-436d-8440-8ee4691864de",
"id": "637913e7-0206-46ba-8ded-70ae3a7c2e19",
"version": 1,
"state": {
"lastGroupId": 1,
"lastNodeId": 84,
"lastLinkId": 262,
"lastNodeId": 97,
"lastLinkId": 282,
"lastRerouteId": 0
},
"revision": 0,
@ -123,8 +118,8 @@
"inputNode": {
"id": -10,
"bounding": [
-6580,
2649,
-6810,
2580,
143.55859375,
160
]
@ -132,8 +127,8 @@
"outputNode": {
"id": -20,
"bounding": [
-5720,
2659,
-4770,
2600,
120,
60
]
@ -149,8 +144,8 @@
"localized_name": "video",
"label": "Before Video",
"pos": [
-6456.44140625,
2669
-6686.44140625,
2600
]
},
{
@ -163,8 +158,8 @@
"localized_name": "video_1",
"label": "After Video",
"pos": [
-6456.44140625,
2689
-6686.44140625,
2620
]
},
{
@ -175,8 +170,8 @@
259
],
"pos": [
-6456.44140625,
2709
-6686.44140625,
2640
]
},
{
@ -187,8 +182,8 @@
260
],
"pos": [
-6456.44140625,
2729
-6686.44140625,
2660
]
},
{
@ -199,8 +194,8 @@
261
],
"pos": [
-6456.44140625,
2749
-6686.44140625,
2680
]
},
{
@ -211,8 +206,8 @@
262
],
"pos": [
-6456.44140625,
2769
-6686.44140625,
2700
]
}
],
@ -226,8 +221,8 @@
],
"localized_name": "VIDEO",
"pos": [
-5700,
2679
-4750,
2620
]
}
],
@ -238,11 +233,11 @@
"type": "GetVideoComponents",
"pos": [
-6390,
2560
2600
],
"size": [
193.530859375,
66
230,
120
],
"flags": {},
"order": 1,
@ -278,9 +273,9 @@
}
],
"properties": {
"Node name for S&R": "GetVideoComponents",
"cnr_id": "comfy-core",
"ver": "0.13.0",
"Node name for S&R": "GetVideoComponents"
"ver": "0.13.0"
}
},
{
@ -291,8 +286,8 @@
2420
],
"size": [
193.530859375,
66
230,
120
],
"flags": {},
"order": 0,
@ -332,21 +327,254 @@
}
],
"properties": {
"Node name for S&R": "GetVideoComponents",
"cnr_id": "comfy-core",
"ver": "0.13.0",
"Node name for S&R": "GetVideoComponents"
"ver": "0.13.0"
}
},
{
"id": 90,
"type": "GetImageSize",
"pos": [
-6390,
3030
],
"size": [
230,
120
],
"flags": {},
"order": 4,
"mode": 0,
"inputs": [
{
"localized_name": "image",
"name": "image",
"type": "IMAGE",
"link": 266
}
],
"outputs": [
{
"localized_name": "width",
"name": "width",
"type": "INT",
"links": [
274
]
},
{
"localized_name": "height",
"name": "height",
"type": "INT",
"links": [
276
]
},
{
"localized_name": "batch_size",
"name": "batch_size",
"type": "INT",
"links": null
}
],
"properties": {
"Node name for S&R": "GetImageSize"
}
},
{
"id": 80,
"type": "CreateVideo",
"pos": [
-5190,
2420
],
"size": [
270,
130
],
"flags": {},
"order": 3,
"mode": 0,
"inputs": [
{
"localized_name": "images",
"name": "images",
"type": "IMAGE",
"link": 282
},
{
"localized_name": "audio",
"name": "audio",
"shape": 7,
"type": "AUDIO",
"link": 251
},
{
"localized_name": "fps",
"name": "fps",
"type": "FLOAT",
"widget": {
"name": "fps"
},
"link": 252
}
],
"outputs": [
{
"localized_name": "VIDEO",
"name": "VIDEO",
"type": "VIDEO",
"links": [
255
]
}
],
"properties": {
"Node name for S&R": "CreateVideo",
"cnr_id": "comfy-core",
"ver": "0.13.0"
},
"widgets_values": [
30
]
},
{
"id": 95,
"type": "ComfyMathExpression",
"pos": [
-6040,
3020
],
"size": [
400,
200
],
"flags": {},
"order": 5,
"mode": 0,
"inputs": [
{
"label": "a",
"localized_name": "values.a",
"name": "values.a",
"type": "FLOAT,INT",
"link": 274
},
{
"label": "b",
"localized_name": "values.b",
"name": "values.b",
"shape": 7,
"type": "FLOAT,INT",
"link": null
},
{
"localized_name": "expression",
"name": "expression",
"type": "STRING",
"widget": {
"name": "expression"
},
"link": null
}
],
"outputs": [
{
"localized_name": "FLOAT",
"name": "FLOAT",
"type": "FLOAT",
"links": null
},
{
"localized_name": "INT",
"name": "INT",
"type": "INT",
"links": [
279
]
}
],
"properties": {
"Node name for S&R": "ComfyMathExpression"
},
"widgets_values": [
"a & ~1"
]
},
{
"id": 96,
"type": "ComfyMathExpression",
"pos": [
-6040,
3290
],
"size": [
400,
200
],
"flags": {},
"order": 6,
"mode": 0,
"inputs": [
{
"label": "a",
"localized_name": "values.a",
"name": "values.a",
"type": "FLOAT,INT",
"link": 276
},
{
"label": "b",
"localized_name": "values.b",
"name": "values.b",
"shape": 7,
"type": "FLOAT,INT",
"link": null
},
{
"localized_name": "expression",
"name": "expression",
"type": "STRING",
"widget": {
"name": "expression"
},
"link": null
}
],
"outputs": [
{
"localized_name": "FLOAT",
"name": "FLOAT",
"type": "FLOAT",
"links": null
},
{
"localized_name": "INT",
"name": "INT",
"type": "INT",
"links": [
280
]
}
],
"properties": {
"Node name for S&R": "ComfyMathExpression"
},
"widgets_values": [
"a & ~1"
]
},
{
"id": 79,
"type": "ImageStitch",
"pos": [
-6390,
2700
2780
],
"size": [
270,
150
160
],
"flags": {},
"order": 2,
@ -408,14 +636,15 @@
"name": "IMAGE",
"type": "IMAGE",
"links": [
250
266,
281
]
}
],
"properties": {
"Node name for S&R": "ImageStitch",
"cnr_id": "comfy-core",
"ver": "0.13.0",
"Node name for S&R": "ImageStitch"
"ver": "0.13.0"
},
"widgets_values": [
"right",
@ -425,60 +654,91 @@
]
},
{
"id": 80,
"type": "CreateVideo",
"id": 97,
"type": "ResizeImageMaskNode",
"pos": [
-6040,
2610
-5560,
2790
],
"size": [
270,
78
160
],
"flags": {},
"order": 3,
"order": 7,
"mode": 0,
"inputs": [
{
"localized_name": "images",
"name": "images",
"type": "IMAGE",
"link": 250
"localized_name": "input",
"name": "input",
"type": "IMAGE,MASK",
"link": 281
},
{
"localized_name": "audio",
"name": "audio",
"shape": 7,
"type": "AUDIO",
"link": 251
},
{
"localized_name": "fps",
"name": "fps",
"type": "FLOAT",
"localized_name": "resize_type",
"name": "resize_type",
"type": "COMFY_DYNAMICCOMBO_V3",
"widget": {
"name": "fps"
"name": "resize_type"
},
"link": 252
"link": null
},
{
"localized_name": "width",
"name": "resize_type.width",
"type": "INT",
"widget": {
"name": "resize_type.width"
},
"link": 279
},
{
"localized_name": "height",
"name": "resize_type.height",
"type": "INT",
"widget": {
"name": "resize_type.height"
},
"link": 280
},
{
"localized_name": "crop",
"name": "resize_type.crop",
"type": "COMBO",
"widget": {
"name": "resize_type.crop"
},
"link": null
},
{
"localized_name": "scale_method",
"name": "scale_method",
"type": "COMBO",
"widget": {
"name": "scale_method"
},
"link": null
}
],
"outputs": [
{
"localized_name": "VIDEO",
"name": "VIDEO",
"type": "VIDEO",
"localized_name": "resized",
"name": "resized",
"type": "*",
"links": [
255
282
]
}
],
"properties": {
"cnr_id": "comfy-core",
"ver": "0.13.0",
"Node name for S&R": "CreateVideo"
"Node name for S&R": "ResizeImageMaskNode"
},
"widgets_values": [
30
"scale dimensions",
512,
512,
"center",
"area"
]
}
],
@ -500,14 +760,6 @@
"target_slot": 1,
"type": "IMAGE"
},
{
"id": 250,
"origin_id": 79,
"origin_slot": 0,
"target_id": 80,
"target_slot": 0,
"type": "IMAGE"
},
{
"id": 251,
"origin_id": 77,
@ -579,13 +831,71 @@
"target_id": 79,
"target_slot": 5,
"type": "COMBO"
},
{
"id": 266,
"origin_id": 79,
"origin_slot": 0,
"target_id": 90,
"target_slot": 0,
"type": "IMAGE"
},
{
"id": 274,
"origin_id": 90,
"origin_slot": 0,
"target_id": 95,
"target_slot": 0,
"type": "INT"
},
{
"id": 276,
"origin_id": 90,
"origin_slot": 1,
"target_id": 96,
"target_slot": 0,
"type": "INT"
},
{
"id": 279,
"origin_id": 95,
"origin_slot": 1,
"target_id": 97,
"target_slot": 2,
"type": "INT"
},
{
"id": 280,
"origin_id": 96,
"origin_slot": 1,
"target_id": 97,
"target_slot": 3,
"type": "INT"
},
{
"id": 281,
"origin_id": 79,
"origin_slot": 0,
"target_id": 97,
"target_slot": 0,
"type": "IMAGE"
},
{
"id": 282,
"origin_id": 97,
"origin_slot": 0,
"target_id": 80,
"target_slot": 0,
"type": "IMAGE"
}
],
"extra": {
"workflowRendererVersion": "LG"
},
"category": "Video Tools/Stitch videos"
"category": "Video Tools/Stitch videos",
"description": "Stitches multiple video clips into a single sequential video file."
}
]
}
}
},
"extra": {}
}

View File

@ -412,9 +412,10 @@
"extra": {
"workflowRendererVersion": "LG"
},
"category": "Video generation and editing/Enhance video"
"category": "Video generation and editing/Enhance video",
"description": "Upscales video to 4× resolution using a GAN-based upscaling model."
}
]
},
"extra": {}
}
}

View File

@ -1135,7 +1135,7 @@ class AudioInjector_WAN(nn.Module):
self.injector_adain_output_layers = nn.ModuleList(
[operations.Linear(dim, dim, dtype=dtype, device=device) for _ in range(audio_injector_id)])
def forward(self, x, block_id, audio_emb, audio_emb_global, seq_len):
def forward(self, x, block_id, audio_emb, audio_emb_global, seq_len, scale=1.0):
audio_attn_id = self.injected_block_id.get(block_id, None)
if audio_attn_id is None:
return x
@ -1148,12 +1148,15 @@ class AudioInjector_WAN(nn.Module):
attn_hidden_states = adain_hidden_states
else:
attn_hidden_states = self.injector_pre_norm_feat[audio_attn_id](input_hidden_states)
audio_emb = rearrange(audio_emb, "b t n c -> (b t) n c", t=num_frames)
attn_audio_emb = audio_emb
if audio_emb.dim() == 3: # WanDancer case
attn_audio_emb = rearrange(audio_emb, "b t c -> (b t) 1 c", t=num_frames)
else: # S2V case
attn_audio_emb = rearrange(audio_emb, "b t n c -> (b t) n c", t=num_frames)
residual_out = self.injector[audio_attn_id](x=attn_hidden_states, context=attn_audio_emb)
residual_out = rearrange(
residual_out, "(b t) n c -> b (t n) c", t=num_frames)
x[:, :seq_len] = x[:, :seq_len] + residual_out
residual_out = rearrange(residual_out, "(b t) n c -> b (t n) c", t=num_frames)
x[:, :seq_len] = x[:, :seq_len] + residual_out * scale
return x

View File

@ -0,0 +1,251 @@
import torch
import torch.nn as nn
import comfy
from comfy.ldm.modules.attention import optimized_attention
from comfy.ldm.flux.math import apply_rope1
from comfy.ldm.flux.layers import EmbedND
from .model import AudioInjector_WAN, WanModel, MLPProj, Head, sinusoidal_embedding_1d
class MusicSelfAttention(nn.Module):
def __init__(self, dim, num_heads, device=None, dtype=None, operations=None):
assert dim % num_heads == 0
super().__init__()
self.embed_dim = dim
self.num_heads = num_heads
self.head_dim = dim // num_heads
self.q_proj = operations.Linear(dim, dim, device=device, dtype=dtype)
self.k_proj = operations.Linear(dim, dim, device=device, dtype=dtype)
self.v_proj = operations.Linear(dim, dim, device=device, dtype=dtype)
self.out_proj = operations.Linear(dim, dim, device=device, dtype=dtype)
def forward(self, x, freqs):
b, s, n, d = *x.shape[:2], self.num_heads, self.head_dim
q = self.q_proj(x).view(b, s, n, d)
q = apply_rope1(q, freqs)
k = self.k_proj(x).view(b, s, n, d)
k = apply_rope1(k, freqs)
x = optimized_attention(
q.view(b, s, n * d),
k.view(b, s, n * d),
self.v_proj(x).view(b, s, n * d),
heads=self.num_heads,
)
return self.out_proj(x)
class MusicEncoderLayer(nn.Module):
def __init__(self, dim: int, num_heads: int, ffn_dim: int, device=None, dtype=None, operations=None):
super().__init__()
self.self_attn = MusicSelfAttention(dim, num_heads, device=device, dtype=dtype, operations=operations)
self.linear1 = operations.Linear(dim, ffn_dim, device=device, dtype=dtype)
self.linear2 = operations.Linear(ffn_dim, dim, device=device, dtype=dtype)
self.norm1 = operations.LayerNorm(dim, device=device, dtype=dtype)
self.norm2 = operations.LayerNorm(dim, device=device, dtype=dtype)
def forward(self, x: torch.Tensor, freqs: torch.Tensor) -> torch.Tensor:
x = x + self.self_attn(self.norm1(x), freqs=freqs)
x = x + self.linear2(torch.nn.functional.gelu(self.linear1(self.norm2(x)))) # ffn
return x
class WanDancerModel(WanModel):
def __init__(self,
model_type='wandancer',
patch_size=(1, 2, 2),
text_len=512,
in_dim=16,
dim=5120,
ffn_dim=8192,
freq_dim=256,
text_dim=4096,
out_dim=16,
num_heads=16,
num_layers=40,
window_size=(-1, -1),
qk_norm=True,
cross_attn_norm=True,
eps=1e-6,
in_dim_ref_conv=None,
image_model=None,
device=None, dtype=None, operations=None,
audio_inject_layers=[0, 4, 8, 12, 16, 20, 24, 27],
music_dim = 256,
music_heads = 4,
music_feature_dim = 35,
music_latent_dim = 256
):
super().__init__(model_type='i2v', patch_size=patch_size, text_len=text_len, in_dim=in_dim, dim=dim, ffn_dim=ffn_dim, freq_dim=freq_dim, text_dim=text_dim, out_dim=out_dim,
num_heads=num_heads, num_layers=num_layers, window_size=window_size, qk_norm=qk_norm, cross_attn_norm=cross_attn_norm, eps=eps, image_model=image_model, in_dim_ref_conv=in_dim_ref_conv,
device=device, dtype=dtype, operations=operations)
self.dtype = dtype
operation_settings = {"operations": operations, "device": device, "dtype": dtype}
self.patch_embedding_global = operations.Conv3d(in_dim, dim, kernel_size=patch_size, stride=patch_size, device=operation_settings.get("device"), dtype=torch.float32)
self.img_emb_refimage = MLPProj(1280, dim, operation_settings=operation_settings)
self.head_global = Head(dim, out_dim, patch_size, eps, operation_settings=operation_settings)
self.music_injector = AudioInjector_WAN(
dim=self.dim,
num_heads=self.num_heads,
inject_layer=audio_inject_layers,
root_net=self,
enable_adain=False,
dtype=dtype, device=device, operations=operations
)
self.music_projection = operations.Linear(music_feature_dim, music_latent_dim, device=device, dtype=dtype)
self.music_encoder = nn.ModuleList([MusicEncoderLayer(dim=music_dim, num_heads=music_heads, ffn_dim=1024, device=device, dtype=dtype, operations=operations) for _ in range(2)])
music_head_dim = music_dim // music_heads
self.music_rope_embedder = EmbedND(dim=music_head_dim, theta=10000.0, axes_dim=[music_head_dim])
def forward_orig(self, x, t, context, clip_fea=None, clip_fea_ref=None, freqs=None, audio_embed=None, fps=30, audio_inject_scale=1.0, transformer_options={}, **kwargs):
# embeddings
if int(fps + 0.5) != 30:
x = self.patch_embedding_global(x.float()).to(x.dtype)
else:
x = self.patch_embedding(x.float()).to(x.dtype)
grid_sizes = x.shape[2:]
latent_frames = grid_sizes[0]
transformer_options["grid_sizes"] = grid_sizes
x = x.flatten(2).transpose(1, 2)
seq_len = x.size(1)
# time embeddings
e = self.time_embedding(sinusoidal_embedding_1d(self.freq_dim, t.flatten()).to(dtype=x[0].dtype))
e = e.reshape(t.shape[0], -1, e.shape[-1])
e0 = self.time_projection(e).unflatten(2, (6, self.dim))
full_ref = None
if self.ref_conv is not None: # model has the weight, but this wasn't used in the original pipeline
full_ref = kwargs.get("reference_latent", None)
if full_ref is not None:
full_ref = self.ref_conv(full_ref).flatten(2).transpose(1, 2)
x = torch.concat((full_ref, x), dim=1)
# context
context = self.text_embedding(context)
audio_emb = None
if audio_embed is not None: # encode music feature[1, frame_num, 35] -> [1, F*8, dim]
music_feature = self.music_projection(audio_embed)
music_seq_len = music_feature.shape[1]
music_ids = torch.arange(music_seq_len, device=music_feature.device, dtype=music_feature.dtype).reshape(1, -1, 1) # create 1D position IDs
music_freqs = self.music_rope_embedder(music_ids).movedim(1, 2)
# apply encoder layers
for layer in self.music_encoder:
music_feature = layer(music_feature, music_freqs)
# interpolate
audio_emb = torch.nn.functional.interpolate(music_feature.unsqueeze(1), size=(latent_frames * 8, self.dim), mode='bilinear').squeeze(1)
context_img_len = 0
if self.img_emb is not None and clip_fea is not None:
context_clip = self.img_emb(clip_fea) # bs x 257 x dim
context = torch.cat([context_clip, context], dim=1)
context_img_len += clip_fea.shape[-2]
if self.img_emb_refimage is not None and clip_fea_ref is not None:
context_clip_ref = self.img_emb_refimage(clip_fea_ref)
context = torch.cat([context_clip_ref, context], dim=1)
context_img_len += clip_fea_ref.shape[-2]
patches_replace = transformer_options.get("patches_replace", {})
blocks_replace = patches_replace.get("dit", {})
transformer_options["total_blocks"] = len(self.blocks)
transformer_options["block_type"] = "double"
for i, block in enumerate(self.blocks):
transformer_options["block_index"] = i
if ("double_block", i) in blocks_replace:
def block_wrap(args):
out = {}
out["img"] = block(args["img"], context=args["txt"], e=args["vec"], freqs=args["pe"], context_img_len=context_img_len, transformer_options=args["transformer_options"])
return out
out = blocks_replace[("double_block", i)]({"img": x, "txt": context, "vec": e0, "pe": freqs, "transformer_options": transformer_options}, {"original_block": block_wrap})
x = out["img"]
else:
x = block(x, e=e0, freqs=freqs, context=context, context_img_len=context_img_len, transformer_options=transformer_options)
if audio_emb is not None:
x = self.music_injector(x, i, audio_emb, audio_emb_global=None, seq_len=seq_len, scale=audio_inject_scale)
# head
if int(fps + 0.5) != 30:
x = self.head_global(x, e)
else:
x = self.head(x, e)
if full_ref is not None:
x = x[:, full_ref.shape[1]:]
# unpatchify
x = self.unpatchify(x, grid_sizes)
return x
def _forward(self, x, timestep, context, clip_fea=None, time_dim_concat=None, transformer_options={}, clip_fea_ref=None, fps=30, audio_inject_scale=1.0, **kwargs):
bs, c, t, h, w = x.shape
x = comfy.ldm.common_dit.pad_to_patch_size(x, self.patch_size)
t_len = t
if time_dim_concat is not None:
time_dim_concat = comfy.ldm.common_dit.pad_to_patch_size(time_dim_concat, self.patch_size)
x = torch.cat([x, time_dim_concat], dim=2)
t_len = x.shape[2]
freqs = self.rope_encode(t_len, h, w, device=x.device, dtype=x.dtype, fps=fps, transformer_options=transformer_options)
return self.forward_orig(x, timestep, context, clip_fea=clip_fea, clip_fea_ref=clip_fea_ref, freqs=freqs, fps=fps, audio_inject_scale=audio_inject_scale, transformer_options=transformer_options, **kwargs)[:, :, :t, :h, :w]
def rope_encode(self, t, h, w, t_start=0, steps_t=None, steps_h=None, steps_w=None, fps=30, device=None, dtype=None, transformer_options={}):
patch_size = self.patch_size
t_len = ((t + (patch_size[0] // 2)) // patch_size[0])
h_len = ((h + (patch_size[1] // 2)) // patch_size[1])
w_len = ((w + (patch_size[2] // 2)) // patch_size[2])
if steps_t is None:
steps_t = t_len
if steps_h is None:
steps_h = h_len
if steps_w is None:
steps_w = w_len
h_start = 0
w_start = 0
rope_options = transformer_options.get("rope_options", None)
if rope_options is not None:
t_len = (t_len - 1.0) * rope_options.get("scale_t", 1.0) + 1.0
h_len = (h_len - 1.0) * rope_options.get("scale_y", 1.0) + 1.0
w_len = (w_len - 1.0) * rope_options.get("scale_x", 1.0) + 1.0
t_start += rope_options.get("shift_t", 0.0)
h_start += rope_options.get("shift_y", 0.0)
w_start += rope_options.get("shift_x", 0.0)
img_ids = torch.zeros((steps_t, steps_h, steps_w, 3), device=device, dtype=dtype)
if int(fps + 0.5) != 30:
time_scale = 30.0 / fps # how many time units each frame represents relative to 30fps
positions_new = torch.arange(steps_t, device=device, dtype=dtype) * time_scale + t_start
total_frames_at_30fps = int(time_scale * steps_t + 0.5)
positions_new[-1] = t_start + (total_frames_at_30fps - 1)
img_ids[:, :, :, 0] = img_ids[:, :, :, 0] + positions_new.reshape(-1, 1, 1)
else:
img_ids[:, :, :, 0] = img_ids[:, :, :, 0] + torch.linspace(t_start, t_start + (t_len - 1), steps=steps_t, device=device, dtype=dtype).reshape(-1, 1, 1)
img_ids[:, :, :, 1] = img_ids[:, :, :, 1] + torch.linspace(h_start, h_start + (h_len - 1), steps=steps_h, device=device, dtype=dtype).reshape(1, -1, 1)
img_ids[:, :, :, 2] = img_ids[:, :, :, 2] + torch.linspace(w_start, w_start + (w_len - 1), steps=steps_w, device=device, dtype=dtype).reshape(1, 1, -1)
img_ids = img_ids.reshape(1, -1, img_ids.shape[-1])
freqs = self.rope_embedder(img_ids).movedim(1, 2)
return freqs

View File

@ -43,6 +43,7 @@ import comfy.ldm.lumina.model
import comfy.ldm.wan.model
import comfy.ldm.wan.model_animate
import comfy.ldm.wan.ar_model
import comfy.ldm.wan.model_wandancer
import comfy.ldm.hunyuan3d.model
import comfy.ldm.hidream.model
import comfy.ldm.chroma.model
@ -1599,6 +1600,30 @@ class WAN21_SCAIL(WAN21):
return out
class WAN22_WanDancer(WAN21):
def __init__(self, model_config, model_type=ModelType.FLOW, image_to_video=True, device=None):
super(WAN21, self).__init__(model_config, model_type, device=device, unet_model=comfy.ldm.wan.model_wandancer.WanDancerModel)
self.image_to_video = image_to_video
def extra_conds(self, **kwargs):
out = super().extra_conds(**kwargs)
audio_embed = kwargs.get("audio_embed", None)
if audio_embed is not None:
out['audio_embed'] = comfy.conds.CONDRegular(audio_embed)
clip_vision_output_ref = kwargs.get("clip_vision_output_ref", None)
if clip_vision_output_ref is not None:
out['clip_fea_ref'] = comfy.conds.CONDRegular(clip_vision_output_ref.penultimate_hidden_states)
fps = kwargs.get("fps", None)
if fps is not None:
out['fps'] = comfy.conds.CONDRegular(torch.FloatTensor([fps]))
audio_inject_scale = kwargs.get("audio_inject_scale", None)
if audio_inject_scale is not None:
out['audio_inject_scale'] = comfy.conds.CONDRegular(torch.FloatTensor([audio_inject_scale]))
return out
class Hunyuan3Dv2(BaseModel):
def __init__(self, model_config, model_type=ModelType.FLOW, device=None):
super().__init__(model_config, model_type, device=device, unet_model=comfy.ldm.hunyuan3d.model.Hunyuan3Dv2)

View File

@ -572,6 +572,8 @@ def detect_unet_config(state_dict, key_prefix, metadata=None):
dit_config["model_type"] = "animate"
elif '{}patch_embedding_pose.weight'.format(key_prefix) in state_dict_keys:
dit_config["model_type"] = "scail"
elif '{}patch_embedding_global.weight'.format(key_prefix) in state_dict_keys:
dit_config["model_type"] = "wandancer"
else:
if '{}img_emb.proj.0.bias'.format(key_prefix) in state_dict_keys:
dit_config["model_type"] = "i2v"

View File

@ -1313,6 +1313,37 @@ class WAN21_SCAIL(WAN21_T2V):
out = model_base.WAN21_SCAIL(self, image_to_video=False, device=device)
return out
class WAN22_WanDancer(WAN21_T2V):
unet_config = {
"image_model": "wan2.1",
"model_type": "wandancer",
"in_dim": 36,
}
def __init__(self, unet_config):
super().__init__(unet_config)
self.memory_usage_factor = 1.8
def get_model(self, state_dict, prefix="", device=None):
out = model_base.WAN22_WanDancer(self, image_to_video=True, device=device)
return out
def process_unet_state_dict(self, state_dict):
out_sd = {}
for k in list(state_dict.keys()):
# split music_encoder in_proj into q_proj, k_proj, v_proj
if "music_encoder" in k and "self_attn.in_proj" in k:
suffix = "weight" if k.endswith("weight") else "bias"
tensor = state_dict[k]
d = tensor.shape[0] // 3
prefix = k.replace(f"in_proj_{suffix}", "")
out_sd[f"{prefix}q_proj.{suffix}"] = tensor[:d]
out_sd[f"{prefix}k_proj.{suffix}"] = tensor[d:2*d]
out_sd[f"{prefix}v_proj.{suffix}"] = tensor[2*d:]
else:
out_sd[k] = state_dict[k]
return out_sd
class Hunyuan3Dv2(supported_models_base.BASE):
unet_config = {
"image_model": "hunyuan3d2",
@ -1982,6 +2013,7 @@ models = [
WAN22_Animate,
WAN21_FlowRVS,
WAN21_SCAIL,
WAN22_WanDancer,
Hunyuan3Dv2mini,
Hunyuan3Dv2,
Hunyuan3Dv2_1,

View File

@ -1196,7 +1196,7 @@ def model_trange(*args, **kwargs):
pbar.i1_time = time.time()
pbar.set_postfix_str(" Model Initialization complete! ")
elif pbar._i == 2:
#bring forward the effective start time based the the diff between first and second iteration
#bring forward the effective start time based the diff between first and second iteration
#to attempt to remove load overhead from the final step rate estimate.
pbar.start_t = pbar.i1_time - (time.time() - pbar.i1_time)
pbar.set_postfix_str("")

View File

@ -23,7 +23,7 @@ class BriaEditImageRequest(BaseModel):
None,
description="Mask image (black and white). Black areas will be preserved, white areas will be edited. "
"If omitted, the edit applies to the entire image. "
"The input image and the the input mask must be of the same size.",
"The input image and the input mask must be of the same size.",
)
negative_prompt: str | None = Field(None)
guidance_scale: float = Field(...)

View File

@ -198,6 +198,62 @@ RECOMMENDED_PRESETS_SEEDREAM_4 = [
("Custom", None, None),
]
_PRESETS_SEEDREAM_1K = [
("(1K) 1024x1024 (1:1)", 1024, 1024),
("(1K) 864x1152 (3:4)", 864, 1152),
("(1K) 1152x864 (4:3)", 1152, 864),
("(1K) 1312x736 (16:9)", 1312, 736),
("(1K) 736x1312 (9:16)", 736, 1312),
("(1K) 832x1248 (2:3)", 832, 1248),
("(1K) 1248x832 (3:2)", 1248, 832),
("(1K) 1568x672 (21:9)", 1568, 672),
]
_PRESETS_SEEDREAM_2K = [
("(2K) 2048x2048 (1:1)", 2048, 2048),
("(2K) 1728x2304 (3:4)", 1728, 2304),
("(2K) 2304x1728 (4:3)", 2304, 1728),
("(2K) 2848x1600 (16:9)", 2848, 1600),
("(2K) 1600x2848 (9:16)", 1600, 2848),
("(2K) 1664x2496 (2:3)", 1664, 2496),
("(2K) 2496x1664 (3:2)", 2496, 1664),
("(2K) 3136x1344 (21:9)", 3136, 1344),
]
_PRESETS_SEEDREAM_3K = [
("(3K) 3072x3072 (1:1)", 3072, 3072),
("(3K) 2592x3456 (3:4)", 2592, 3456),
("(3K) 3456x2592 (4:3)", 3456, 2592),
("(3K) 4096x2304 (16:9)", 4096, 2304),
("(3K) 2304x4096 (9:16)", 2304, 4096),
("(3K) 2496x3744 (2:3)", 2496, 3744),
("(3K) 3744x2496 (3:2)", 3744, 2496),
("(3K) 4704x2016 (21:9)", 4704, 2016),
]
_PRESETS_SEEDREAM_4K = [
("(4K) 4096x4096 (1:1)", 4096, 4096),
("(4K) 3520x4704 (3:4)", 3520, 4704),
("(4K) 4704x3520 (4:3)", 4704, 3520),
("(4K) 5504x3040 (16:9)", 5504, 3040),
("(4K) 3040x5504 (9:16)", 3040, 5504),
("(4K) 3328x4992 (2:3)", 3328, 4992),
("(4K) 4992x3328 (3:2)", 4992, 3328),
("(4K) 6240x2656 (21:9)", 6240, 2656),
]
_CUSTOM_PRESET = [("Custom", None, None)]
RECOMMENDED_PRESETS_SEEDREAM_5_LITE = (
_PRESETS_SEEDREAM_2K + _PRESETS_SEEDREAM_3K + _PRESETS_SEEDREAM_4K + _CUSTOM_PRESET
)
RECOMMENDED_PRESETS_SEEDREAM_4_5 = (
_PRESETS_SEEDREAM_2K + _PRESETS_SEEDREAM_4K + _CUSTOM_PRESET
)
RECOMMENDED_PRESETS_SEEDREAM_4_0 = (
_PRESETS_SEEDREAM_1K + _PRESETS_SEEDREAM_2K + _PRESETS_SEEDREAM_4K + _CUSTOM_PRESET
)
# Seedance 2.0 reference video pixel count limits per model and output resolution.
SEEDANCE2_REF_VIDEO_PIXEL_LIMITS = {
"dreamina-seedance-2-0-260128": {

View File

@ -1,10 +1,11 @@
from __future__ import annotations
from enum import Enum
from typing import Optional, List, Dict, Any, Union
from typing import Optional, Any
from pydantic import BaseModel, Field, RootModel
class TripoModelVersion(str, Enum):
v3_1_20260211 = 'v3.1-20260211'
v3_0_20250812 = 'v3.0-20250812'
v2_5_20250123 = 'v2.5-20250123'
v2_0_20240919 = 'v2.0-20240919'
@ -142,7 +143,7 @@ class TripoFileEmptyReference(BaseModel):
pass
class TripoFileReference(RootModel):
root: Union[TripoFileTokenReference, TripoUrlReference, TripoObjectReference, TripoFileEmptyReference]
root: TripoFileTokenReference | TripoUrlReference | TripoObjectReference | TripoFileEmptyReference
class TripoGetStsTokenRequest(BaseModel):
format: str = Field(..., description='The format of the image')
@ -183,7 +184,7 @@ class TripoImageToModelRequest(BaseModel):
class TripoMultiviewToModelRequest(BaseModel):
type: TripoTaskType = TripoTaskType.MULTIVIEW_TO_MODEL
files: List[TripoFileReference] = Field(..., description='The file references to convert to a model')
files: list[TripoFileReference] = Field(..., description='The file references to convert to a model')
model_version: Optional[TripoModelVersion] = Field(None, description='The model version to use for generation')
orthographic_projection: Optional[bool] = Field(False, description='Whether to use orthographic projection')
face_limit: Optional[int] = Field(None, description='The number of faces to limit the generation to')
@ -251,27 +252,13 @@ class TripoConvertModelRequest(BaseModel):
with_animation: Optional[bool] = Field(None, description='Whether to include animations')
pack_uv: Optional[bool] = Field(None, description='Whether to pack the UVs')
bake: Optional[bool] = Field(None, description='Whether to bake the model')
part_names: Optional[List[str]] = Field(None, description='The names of the parts to include')
part_names: Optional[list[str]] = Field(None, description='The names of the parts to include')
fbx_preset: Optional[TripoFbxPreset] = Field(None, description='The preset for the FBX export')
export_vertex_colors: Optional[bool] = Field(None, description='Whether to export the vertex colors')
export_orientation: Optional[TripoOrientation] = Field(None, description='The orientation for the export')
animate_in_place: Optional[bool] = Field(None, description='Whether to animate in place')
class TripoTaskRequest(RootModel):
root: Union[
TripoTextToModelRequest,
TripoImageToModelRequest,
TripoMultiviewToModelRequest,
TripoTextureModelRequest,
TripoRefineModelRequest,
TripoAnimatePrerigcheckRequest,
TripoAnimateRigRequest,
TripoAnimateRetargetRequest,
TripoStylizeModelRequest,
TripoConvertModelRequest
]
class TripoTaskOutput(BaseModel):
model: Optional[str] = Field(None, description='URL to the model')
base_model: Optional[str] = Field(None, description='URL to the base model')
@ -283,12 +270,13 @@ class TripoTask(BaseModel):
task_id: str = Field(..., description='The task ID')
type: Optional[str] = Field(None, description='The type of task')
status: Optional[TripoTaskStatus] = Field(None, description='The status of the task')
input: Optional[Dict[str, Any]] = Field(None, description='The input parameters for the task')
input: Optional[dict[str, Any]] = Field(None, description='The input parameters for the task')
output: Optional[TripoTaskOutput] = Field(None, description='The output of the task')
progress: Optional[int] = Field(None, description='The progress of the task', ge=0, le=100)
create_time: Optional[int] = Field(None, description='The creation time of the task')
running_left_time: Optional[int] = Field(None, description='The estimated time left for the task')
queue_position: Optional[int] = Field(None, description='The position in the queue')
consumed_credit: int | None = Field(None)
class TripoTaskResponse(BaseModel):
code: int = Field(0, description='The response code')
@ -296,7 +284,7 @@ class TripoTaskResponse(BaseModel):
class TripoGeneralResponse(BaseModel):
code: int = Field(0, description='The response code')
data: Dict[str, str] = Field(..., description='The task ID data')
data: dict[str, str] = Field(..., description='The task ID data')
class TripoBalanceData(BaseModel):
balance: float = Field(..., description='The account balance')

View File

@ -596,6 +596,7 @@ class Flux2ProImageNode(IO.ComfyNode):
depends_on=IO.PriceBadgeDepends(widgets=["width", "height"], inputs=["images"]),
expr=cls.PRICE_BADGE_EXPR,
),
is_deprecated=True,
)
@classmethod
@ -674,6 +675,175 @@ class Flux2MaxImageNode(Flux2ProImageNode):
"""
_FLUX2_MODEL_ENDPOINTS = {
"Flux.2 [pro]": "/proxy/bfl/flux-2-pro/generate",
"Flux.2 [max]": "/proxy/bfl/flux-2-max/generate",
}
def _flux2_model_inputs():
return [
IO.Int.Input(
"width",
default=1024,
min=256,
max=2048,
step=32,
),
IO.Int.Input(
"height",
default=768,
min=256,
max=2048,
step=32,
),
IO.Autogrow.Input(
"images",
template=IO.Autogrow.TemplateNames(
IO.Image.Input("image"),
names=[f"image_{i}" for i in range(1, 9)],
min=0,
),
tooltip="Optional reference image(s) for image-to-image generation. Up to 8 images.",
),
]
class Flux2ImageNode(IO.ComfyNode):
@classmethod
def define_schema(cls) -> IO.Schema:
return IO.Schema(
node_id="Flux2ImageNode",
display_name="Flux.2 Image",
category="api node/image/BFL",
description="Generate images via Flux.2 [pro] or Flux.2 [max] from a prompt and optional reference images.",
inputs=[
IO.String.Input(
"prompt",
multiline=True,
default="",
tooltip="Prompt for the image generation or edit",
),
IO.DynamicCombo.Input(
"model",
options=[
IO.DynamicCombo.Option("Flux.2 [pro]", _flux2_model_inputs()),
IO.DynamicCombo.Option("Flux.2 [max]", _flux2_model_inputs()),
],
),
IO.Int.Input(
"seed",
default=0,
min=0,
max=0xFFFFFFFFFFFFFFFF,
control_after_generate=True,
tooltip="The random seed used for creating the noise.",
),
],
outputs=[IO.Image.Output()],
hidden=[
IO.Hidden.auth_token_comfy_org,
IO.Hidden.api_key_comfy_org,
IO.Hidden.unique_id,
],
is_api_node=True,
price_badge=IO.PriceBadge(
depends_on=IO.PriceBadgeDepends(
widgets=["model", "model.width", "model.height"],
input_groups=["model.images"],
),
expr="""
(
$isMax := widgets.model = "flux.2 [max]";
$MP := 1024 * 1024;
$w := $lookup(widgets, "model.width");
$h := $lookup(widgets, "model.height");
$outMP := $max([1, $floor((($w * $h) + $MP - 1) / $MP)]);
$outputCost := $isMax
? (0.07 + 0.03 * ($outMP - 1))
: (0.03 + 0.015 * ($outMP - 1));
$refMin := $isMax ? 0.03 : 0.015;
$refMax := $isMax ? 0.24 : 0.12;
$hasRefs := $lookup(inputGroups, "model.images") > 0;
$hasRefs
? {
"type": "range_usd",
"min_usd": $outputCost + $refMin,
"max_usd": $outputCost + $refMax,
"format": { "approximate": true }
}
: {"type": "usd", "usd": $outputCost}
)
""",
),
)
@classmethod
async def execute(
cls,
prompt: str,
model: dict,
seed: int,
) -> IO.NodeOutput:
model_choice = model["model"]
endpoint = _FLUX2_MODEL_ENDPOINTS[model_choice]
width = model["width"]
height = model["height"]
images_dict = model.get("images") or {}
image_tensors: list[Input.Image] = [t for t in images_dict.values() if t is not None]
n_images = sum(get_number_of_images(t) for t in image_tensors)
if n_images > 8:
raise ValueError("The current maximum number of supported images is 8.")
flat_tensors: list[torch.Tensor] = []
for tensor in image_tensors:
if len(tensor.shape) == 4:
flat_tensors.extend(tensor[i] for i in range(tensor.shape[0]))
else:
flat_tensors.append(tensor)
reference_images: dict[str, str] = {}
for idx, tensor in enumerate(flat_tensors):
key_name = f"input_image_{idx + 1}" if idx else "input_image"
reference_images[key_name] = tensor_to_base64_string(tensor, total_pixels=2048 * 2048)
initial_response = await sync_op(
cls,
ApiEndpoint(path=endpoint, method="POST"),
response_model=BFLFluxProGenerateResponse,
data=Flux2ProGenerateRequest(
prompt=prompt,
width=width,
height=height,
seed=seed,
**reference_images,
),
)
def price_extractor(_r: BaseModel) -> float | None:
return None if initial_response.cost is None else initial_response.cost / 100
response = await poll_op(
cls,
ApiEndpoint(initial_response.polling_url),
response_model=BFLFluxStatusResponse,
status_extractor=lambda r: r.status,
progress_extractor=lambda r: r.progress,
price_extractor=price_extractor,
completed_statuses=[BFLStatus.ready],
failed_statuses=[
BFLStatus.request_moderated,
BFLStatus.content_moderated,
BFLStatus.error,
BFLStatus.task_not_found,
],
queued_statuses=[],
)
return IO.NodeOutput(await download_url_to_image_tensor(response.result["sample"]))
class BFLExtension(ComfyExtension):
@override
async def get_node_list(self) -> list[type[IO.ComfyNode]]:
@ -685,6 +855,7 @@ class BFLExtension(ComfyExtension):
FluxProFillNode,
Flux2ProImageNode,
Flux2MaxImageNode,
Flux2ImageNode,
]

View File

@ -10,6 +10,9 @@ from comfy_api.latest import IO, ComfyExtension, Input
from comfy_api_nodes.apis.bytedance import (
RECOMMENDED_PRESETS,
RECOMMENDED_PRESETS_SEEDREAM_4,
RECOMMENDED_PRESETS_SEEDREAM_4_0,
RECOMMENDED_PRESETS_SEEDREAM_4_5,
RECOMMENDED_PRESETS_SEEDREAM_5_LITE,
SEEDANCE2_PRICE_PER_1K_TOKENS,
SEEDANCE2_REF_VIDEO_PIXEL_LIMITS,
VIDEO_TASKS_EXECUTION_TIME,
@ -68,6 +71,12 @@ SEEDREAM_MODELS = {
"seedream-4-0-250828": "seedream-4-0-250828",
}
SEEDREAM_PRESETS = {
"seedream-5-0-260128": RECOMMENDED_PRESETS_SEEDREAM_5_LITE,
"seedream-4-5-251128": RECOMMENDED_PRESETS_SEEDREAM_4_5,
"seedream-4-0-250828": RECOMMENDED_PRESETS_SEEDREAM_4_0,
}
# Long-running tasks endpoints(e.g., video)
BYTEPLUS_TASK_ENDPOINT = "/proxy/byteplus/api/v3/contents/generations/tasks"
BYTEPLUS_TASK_STATUS_ENDPOINT = "/proxy/byteplus/api/v3/contents/generations/tasks" # + /{task_id}
@ -562,6 +571,7 @@ class ByteDanceSeedreamNode(IO.ComfyNode):
)
""",
),
is_deprecated=True,
)
@classmethod
@ -651,6 +661,226 @@ class ByteDanceSeedreamNode(IO.ComfyNode):
return IO.NodeOutput(torch.cat([await download_url_to_image_tensor(i) for i in urls]))
def _seedream_model_inputs(*, max_ref_images: int, presets: list):
return [
IO.Combo.Input(
"size_preset",
options=[label for label, _, _ in presets],
tooltip="Pick a recommended size. Select Custom to use the width and height below.",
),
IO.Int.Input(
"width",
default=2048,
min=1024,
max=6240,
step=2,
tooltip="Custom width for image. Value is working only if `size_preset` is set to `Custom`",
),
IO.Int.Input(
"height",
default=2048,
min=1024,
max=4992,
step=2,
tooltip="Custom height for image. Value is working only if `size_preset` is set to `Custom`",
),
IO.Int.Input(
"max_images",
default=1,
min=1,
max=max_ref_images,
step=1,
display_mode=IO.NumberDisplay.number,
tooltip="Maximum number of images to generate. With 1, exactly one image is produced. "
"With >1, the model generates between 1 and max_images related images "
"(e.g., story scenes, character variations). "
"Total images (input + generated) cannot exceed 15.",
),
IO.Autogrow.Input(
"images",
template=IO.Autogrow.TemplateNames(
IO.Image.Input("image"),
names=[f"image_{i}" for i in range(1, max_ref_images + 1)],
min=0,
),
tooltip=f"Optional reference image(s) for image-to-image or multi-reference generation. "
f"Up to {max_ref_images} images.",
),
IO.Boolean.Input(
"fail_on_partial",
default=False,
tooltip="If enabled, abort execution if any requested images are missing or return an error.",
advanced=True,
),
]
class ByteDanceSeedreamNodeV2(IO.ComfyNode):
@classmethod
def define_schema(cls):
return IO.Schema(
node_id="ByteDanceSeedreamNodeV2",
display_name="ByteDance Seedream 4.5 & 5.0",
category="api node/image/ByteDance",
description="Unified text-to-image generation and precise single-sentence editing at up to 4K resolution.",
inputs=[
IO.String.Input(
"prompt",
multiline=True,
default="",
tooltip="Text prompt for creating or editing an image.",
),
IO.DynamicCombo.Input(
"model",
options=[
IO.DynamicCombo.Option(
"seedream 5.0 lite",
_seedream_model_inputs(max_ref_images=14, presets=RECOMMENDED_PRESETS_SEEDREAM_5_LITE),
),
IO.DynamicCombo.Option(
"seedream-4-5-251128",
_seedream_model_inputs(max_ref_images=10, presets=RECOMMENDED_PRESETS_SEEDREAM_4_5),
),
IO.DynamicCombo.Option(
"seedream-4-0-250828",
_seedream_model_inputs(max_ref_images=10, presets=RECOMMENDED_PRESETS_SEEDREAM_4_0),
),
],
),
IO.Int.Input(
"seed",
default=0,
min=0,
max=2147483647,
step=1,
display_mode=IO.NumberDisplay.number,
control_after_generate=True,
tooltip="Seed to use for generation.",
),
IO.Boolean.Input(
"watermark",
default=False,
tooltip='Whether to add an "AI generated" watermark to the image.',
advanced=True,
),
],
outputs=[
IO.Image.Output(),
],
hidden=[
IO.Hidden.auth_token_comfy_org,
IO.Hidden.api_key_comfy_org,
IO.Hidden.unique_id,
],
is_api_node=True,
price_badge=IO.PriceBadge(
depends_on=IO.PriceBadgeDepends(widgets=["model"]),
expr="""
(
$price := $contains(widgets.model, "5.0 lite") ? 0.035 :
$contains(widgets.model, "4-5") ? 0.04 : 0.03;
{
"type":"usd",
"usd": $price,
"format": { "suffix":" x images/Run", "approximate": true }
}
)
""",
),
)
@classmethod
async def execute(
cls,
prompt: str,
model: dict,
seed: int = 0,
watermark: bool = False,
) -> IO.NodeOutput:
validate_string(prompt, strip_whitespace=True, min_length=1)
model_id = SEEDREAM_MODELS[model["model"]]
presets = SEEDREAM_PRESETS[model_id]
size_preset = model.get("size_preset", presets[0][0])
width = model.get("width", 2048)
height = model.get("height", 2048)
max_images = model.get("max_images", 1)
sequential_image_generation = "disabled" if max_images == 1 else "auto"
images_dict = model.get("images") or {}
fail_on_partial = model.get("fail_on_partial", False)
w = h = None
for label, tw, th in presets:
if label == size_preset:
w, h = tw, th
break
if w is None or h is None:
w, h = width, height
out_num_pixels = w * h
mp_provided = out_num_pixels / 1_000_000.0
if ("seedream-4-5" in model_id or "seedream-5-0" in model_id) and out_num_pixels < 3686400:
raise ValueError(
f"Minimum image resolution for the selected model is 3.68MP, but {mp_provided:.2f}MP provided."
)
if "seedream-4-0" in model_id and out_num_pixels < 921600:
raise ValueError(
f"Minimum image resolution that the selected model can generate is 0.92MP, "
f"but {mp_provided:.2f}MP provided."
)
if out_num_pixels > 16_777_216:
raise ValueError(
f"Maximum image resolution for the selected model is 16.78MP, but {mp_provided:.2f}MP provided."
)
image_tensors: list[Input.Image] = [t for t in images_dict.values() if t is not None]
n_input_images = sum(get_number_of_images(t) for t in image_tensors)
max_num_of_images = 14 if model_id == "seedream-5-0-260128" else 10
if n_input_images > max_num_of_images:
raise ValueError(
f"Maximum of {max_num_of_images} reference images are supported, but {n_input_images} received."
)
if sequential_image_generation == "auto" and n_input_images + max_images > 15:
raise ValueError(
"The maximum number of generated images plus the number of reference images cannot exceed 15."
)
reference_images_urls: list[str] = []
if image_tensors:
for tensor in image_tensors:
validate_image_aspect_ratio(tensor, (1, 3), (3, 1))
reference_images_urls = await upload_images_to_comfyapi(
cls,
image_tensors,
max_images=n_input_images,
mime_type="image/png",
wait_label="Uploading reference images",
)
response = await sync_op(
cls,
ApiEndpoint(path=BYTEPLUS_IMAGE_ENDPOINT, method="POST"),
response_model=ImageTaskCreationResponse,
data=Seedream4TaskCreationRequest(
model=model_id,
prompt=prompt,
image=reference_images_urls,
size=f"{w}x{h}",
seed=seed,
sequential_image_generation=sequential_image_generation,
sequential_image_generation_options=Seedream4Options(max_images=max_images),
watermark=watermark,
),
)
if len(response.data) == 1:
return IO.NodeOutput(await download_url_to_image_tensor(get_image_url_from_response(response)))
urls = [str(d["url"]) for d in response.data if isinstance(d, dict) and "url" in d]
if fail_on_partial and len(urls) < len(response.data):
raise RuntimeError(f"Only {len(urls)} of {len(response.data)} images were generated before error.")
return IO.NodeOutput(torch.cat([await download_url_to_image_tensor(i) for i in urls]))
class ByteDanceTextToVideoNode(IO.ComfyNode):
@classmethod
@ -2105,6 +2335,7 @@ class ByteDanceExtension(ComfyExtension):
return [
ByteDanceImageNode,
ByteDanceSeedreamNode,
ByteDanceSeedreamNodeV2,
ByteDanceTextToVideoNode,
ByteDanceImageToVideoNode,
ByteDanceFirstLastFrameNode,

View File

@ -162,6 +162,61 @@ class GrokImageNode(IO.ComfyNode):
)
_GROK_IMAGE_EDIT_ASPECT_RATIO_OPTIONS = [
"auto",
"1:1",
"2:3",
"3:2",
"3:4",
"4:3",
"9:16",
"16:9",
"9:19.5",
"19.5:9",
"9:20",
"20:9",
"1:2",
"2:1",
]
def _grok_image_edit_model_inputs(*, max_ref_images: int, with_aspect_ratio: bool):
inputs = [
IO.Autogrow.Input(
"images",
template=IO.Autogrow.TemplateNames(
IO.Image.Input("image"),
names=[f"image_{i}" for i in range(1, max_ref_images + 1)],
min=1,
),
tooltip=(
"Reference image to edit."
if max_ref_images == 1
else f"Reference image(s) to edit. Up to {max_ref_images} images."
),
),
IO.Combo.Input("resolution", options=["1K", "2K"]),
IO.Int.Input(
"number_of_images",
default=1,
min=1,
max=10,
step=1,
tooltip="Number of edited images to generate",
display_mode=IO.NumberDisplay.number,
),
]
if with_aspect_ratio:
inputs.append(
IO.Combo.Input(
"aspect_ratio",
options=_GROK_IMAGE_EDIT_ASPECT_RATIO_OPTIONS,
tooltip="Only allowed when multiple images are connected.",
)
)
return inputs
class GrokImageEditNode(IO.ComfyNode):
@classmethod
@ -256,6 +311,7 @@ class GrokImageEditNode(IO.ComfyNode):
)
""",
),
is_deprecated=True,
)
@classmethod
@ -303,6 +359,143 @@ class GrokImageEditNode(IO.ComfyNode):
)
class GrokImageEditNodeV2(IO.ComfyNode):
@classmethod
def define_schema(cls):
return IO.Schema(
node_id="GrokImageEditNodeV2",
display_name="Grok Image Edit",
category="api node/image/Grok",
description="Modify an existing image based on a text prompt",
inputs=[
IO.String.Input(
"prompt",
multiline=True,
default="",
tooltip="The text prompt used to generate the image",
),
IO.DynamicCombo.Input(
"model",
options=[
IO.DynamicCombo.Option(
"grok-imagine-image-quality",
_grok_image_edit_model_inputs(max_ref_images=3, with_aspect_ratio=True),
),
IO.DynamicCombo.Option(
"grok-imagine-image-pro",
_grok_image_edit_model_inputs(max_ref_images=1, with_aspect_ratio=False),
),
IO.DynamicCombo.Option(
"grok-imagine-image",
_grok_image_edit_model_inputs(max_ref_images=3, with_aspect_ratio=True),
),
],
),
IO.Int.Input(
"seed",
default=0,
min=0,
max=2147483647,
step=1,
display_mode=IO.NumberDisplay.number,
control_after_generate=True,
tooltip="Seed to determine if node should re-run; "
"actual results are nondeterministic regardless of seed.",
),
],
outputs=[
IO.Image.Output(),
],
hidden=[
IO.Hidden.auth_token_comfy_org,
IO.Hidden.api_key_comfy_org,
IO.Hidden.unique_id,
],
is_api_node=True,
price_badge=IO.PriceBadge(
depends_on=IO.PriceBadgeDepends(
widgets=["model", "model.resolution", "model.number_of_images"],
),
expr="""
(
$isQualityModel := widgets.model = "grok-imagine-image-quality";
$isPro := $contains(widgets.model, "pro");
$res := $lookup(widgets, "model.resolution");
$n := $lookup(widgets, "model.number_of_images");
$rate := $isQualityModel
? ($res = "1k" ? 0.05 : 0.07)
: ($isPro ? 0.07 : 0.02);
$base := $isQualityModel ? 0.01 : 0.002;
$output := $rate * $n;
$isPro
? {"type":"usd","usd": $base + $output}
: {"type":"range_usd","min_usd": $base + $output, "max_usd": 3 * $base + $output}
)
""",
),
)
@classmethod
async def execute(
cls,
prompt: str,
model: dict,
seed: int,
) -> IO.NodeOutput:
validate_string(prompt, strip_whitespace=True, min_length=1)
model_id = model["model"]
resolution = model["resolution"]
number_of_images = model["number_of_images"]
images_dict = model.get("images") or {}
aspect_ratio = model.get("aspect_ratio", "auto")
image_tensors: list[Input.Image] = [t for t in images_dict.values() if t is not None]
n_images = sum(get_number_of_images(t) for t in image_tensors)
if n_images < 1:
raise ValueError("At least one image is required for editing.")
if model_id == "grok-imagine-image-pro" and n_images > 1:
raise ValueError("The pro model supports only 1 input image.")
if model_id != "grok-imagine-image-pro" and n_images > 3:
raise ValueError("A maximum of 3 input images is supported.")
if aspect_ratio != "auto" and n_images == 1:
raise ValueError(
"Custom aspect ratio is only allowed when multiple images are connected to the image input."
)
flat_tensors: list[torch.Tensor] = []
for tensor in image_tensors:
if len(tensor.shape) == 4:
flat_tensors.extend(tensor[i] for i in range(tensor.shape[0]))
else:
flat_tensors.append(tensor)
response = await sync_op(
cls,
ApiEndpoint(path="/proxy/xai/v1/images/edits", method="POST"),
data=ImageEditRequest(
model=model_id,
images=[
InputUrlObject(url=f"data:image/png;base64,{tensor_to_base64_string(i)}") for i in flat_tensors
],
prompt=prompt,
resolution=resolution.lower(),
n=number_of_images,
seed=seed,
aspect_ratio=None if aspect_ratio == "auto" else aspect_ratio,
),
response_model=ImageGenerationResponse,
price_extractor=_extract_grok_price,
)
if len(response.data) == 1:
return IO.NodeOutput(await download_url_to_image_tensor(response.data[0].url))
return IO.NodeOutput(
torch.cat(
[await download_url_to_image_tensor(i) for i in [str(d.url) for d in response.data if d.url]],
)
)
class GrokVideoNode(IO.ComfyNode):
@classmethod
@ -737,6 +930,7 @@ class GrokExtension(ComfyExtension):
return [
GrokImageNode,
GrokImageEditNode,
GrokImageEditNodeV2,
GrokVideoNode,
GrokVideoReferenceNode,
GrokVideoEditNode,

View File

@ -27,6 +27,7 @@ from comfy_api_nodes.util import (
ApiEndpoint,
download_url_to_bytesio,
downscale_image_tensor,
get_number_of_images,
poll_op,
sync_op,
tensor_to_base64_string,
@ -372,6 +373,7 @@ class OpenAIGPTImage1(IO.ComfyNode):
display_name="OpenAI GPT Image 2",
category="api node/image/OpenAI",
description="Generates images synchronously via OpenAI's GPT Image endpoint.",
is_deprecated=True,
inputs=[
IO.String.Input(
"prompt",
@ -640,6 +642,316 @@ class OpenAIGPTImage1(IO.ComfyNode):
return IO.NodeOutput(await validate_and_cast_response(response))
def _gpt_image_shared_inputs():
"""Inputs shared by all GPT Image models (quality + reference images + mask)."""
return [
IO.Combo.Input(
"quality",
default="low",
options=["low", "medium", "high"],
tooltip="Image quality, affects cost and generation time.",
),
IO.Autogrow.Input(
"images",
template=IO.Autogrow.TemplateNames(
IO.Image.Input("image"),
names=[f"image_{i}" for i in range(1, 17)],
min=0,
),
tooltip="Optional reference image(s) for image editing. Up to 16 images.",
),
IO.Mask.Input(
"mask",
optional=True,
tooltip="Optional mask for inpainting (white areas will be replaced). "
"Requires exactly one reference image.",
),
]
def _gpt_image_legacy_model_inputs():
"""Per-model widget set for legacy gpt-image-1 / gpt-image-1.5 (4 base sizes, transparent bg allowed)."""
return [
IO.Combo.Input(
"size",
default="auto",
options=["auto", "1024x1024", "1024x1536", "1536x1024"],
tooltip="Image size.",
),
IO.Combo.Input(
"background",
default="auto",
options=["auto", "opaque", "transparent"],
tooltip="Return image with or without background.",
),
*_gpt_image_shared_inputs(),
]
class OpenAIGPTImageNodeV2(IO.ComfyNode):
@classmethod
def define_schema(cls):
return IO.Schema(
node_id="OpenAIGPTImageNodeV2",
display_name="OpenAI GPT Image 2",
category="api node/image/OpenAI",
description="Generates images via OpenAI's GPT Image endpoint.",
inputs=[
IO.String.Input(
"prompt",
default="",
multiline=True,
tooltip="Text prompt for GPT Image",
),
IO.DynamicCombo.Input(
"model",
options=[
IO.DynamicCombo.Option(
"gpt-image-2",
[
IO.Combo.Input(
"size",
default="auto",
options=[
"auto",
"1024x1024",
"1024x1536",
"1536x1024",
"2048x2048",
"2048x1152",
"1152x2048",
"3840x2160",
"2160x3840",
"Custom",
],
tooltip="Image size. Select 'Custom' to use the custom width and height.",
),
IO.Int.Input(
"custom_width",
default=1024,
min=1024,
max=3840,
step=16,
tooltip="Used only when `size` is 'Custom'. Must be a multiple of 16.",
),
IO.Int.Input(
"custom_height",
default=1024,
min=1024,
max=3840,
step=16,
tooltip="Used only when `size` is 'Custom'. Must be a multiple of 16.",
),
IO.Combo.Input(
"background",
default="auto",
options=["auto", "opaque"],
tooltip="Return image with or without background.",
),
*_gpt_image_shared_inputs(),
],
),
IO.DynamicCombo.Option("gpt-image-1.5", _gpt_image_legacy_model_inputs()),
IO.DynamicCombo.Option("gpt-image-1", _gpt_image_legacy_model_inputs()),
],
),
IO.Int.Input(
"n",
default=1,
min=1,
max=8,
step=1,
tooltip="How many images to generate",
display_mode=IO.NumberDisplay.number,
),
IO.Int.Input(
"seed",
default=0,
min=0,
max=2147483647,
step=1,
display_mode=IO.NumberDisplay.number,
control_after_generate=True,
tooltip="not implemented yet in backend",
),
],
outputs=[IO.Image.Output()],
hidden=[
IO.Hidden.auth_token_comfy_org,
IO.Hidden.api_key_comfy_org,
IO.Hidden.unique_id,
],
is_api_node=True,
price_badge=IO.PriceBadge(
depends_on=IO.PriceBadgeDepends(widgets=["model", "model.quality", "n"]),
expr="""
(
$ranges := {
"gpt-image-1": {
"low": [0.011, 0.02],
"medium": [0.042, 0.07],
"high": [0.167, 0.25]
},
"gpt-image-1.5": {
"low": [0.009, 0.02],
"medium": [0.034, 0.062],
"high": [0.133, 0.22]
},
"gpt-image-2": {
"low": [0.0048, 0.019],
"medium": [0.041, 0.168],
"high": [0.165, 0.67]
}
};
$range := $lookup($lookup($ranges, widgets.model), $lookup(widgets, "model.quality"));
$nRaw := widgets.n;
$n := ($nRaw != null and $nRaw != 0) ? $nRaw : 1;
($n = 1)
? {"type":"range_usd","min_usd": $range[0], "max_usd": $range[1], "format": {"approximate": true}}
: {
"type":"range_usd",
"min_usd": $range[0] * $n,
"max_usd": $range[1] * $n,
"format": { "suffix": "/Run", "approximate": true }
}
)
""",
),
)
@classmethod
async def execute(
cls,
prompt: str,
model: dict,
n: int,
seed: int,
) -> IO.NodeOutput:
validate_string(prompt, strip_whitespace=False)
model_id = model["model"]
size = model["size"]
background = model["background"]
quality = model["quality"]
custom_width = model.get("custom_width", 1024)
custom_height = model.get("custom_height", 1024)
images_dict = model.get("images") or {}
image_tensors: list[Input.Image] = [t for t in images_dict.values() if t is not None]
n_images = sum(get_number_of_images(t) for t in image_tensors)
mask = model.get("mask")
if mask is not None and n_images == 0:
raise ValueError("Cannot use a mask without an input image")
if size == "Custom":
if custom_width % 16 != 0 or custom_height % 16 != 0:
raise ValueError(
f"Custom width and height must be multiples of 16, got {custom_width}x{custom_height}"
)
if max(custom_width, custom_height) > 3840:
raise ValueError(
f"Custom resolution max edge must be <= 3840, got {custom_width}x{custom_height}"
)
ratio = max(custom_width, custom_height) / min(custom_width, custom_height)
if ratio > 3:
raise ValueError(
f"Custom resolution aspect ratio must not exceed 3:1, got {custom_width}x{custom_height}"
)
total_pixels = custom_width * custom_height
if not 655_360 <= total_pixels <= 8_294_400:
raise ValueError(
f"Custom resolution total pixels must be between 655,360 and 8,294,400, got {total_pixels}"
)
size = f"{custom_width}x{custom_height}"
if model_id == "gpt-image-1":
price_extractor = calculate_tokens_price_image_1
elif model_id == "gpt-image-1.5":
price_extractor = calculate_tokens_price_image_1_5
elif model_id == "gpt-image-2":
price_extractor = calculate_tokens_price_image_2_0
else:
raise ValueError(f"Unknown model: {model_id}")
if image_tensors:
flat: list[torch.Tensor] = []
for tensor in image_tensors:
if len(tensor.shape) == 4:
flat.extend(tensor[i : i + 1] for i in range(tensor.shape[0]))
else:
flat.append(tensor.unsqueeze(0))
files = []
for i, single_image in enumerate(flat):
scaled_image = downscale_image_tensor(single_image, total_pixels=2048 * 2048).squeeze()
image_np = (scaled_image.numpy() * 255).astype(np.uint8)
img = Image.fromarray(image_np)
img_byte_arr = BytesIO()
img.save(img_byte_arr, format="PNG")
img_byte_arr.seek(0)
if len(flat) == 1:
files.append(("image", (f"image_{i}.png", img_byte_arr, "image/png")))
else:
files.append(("image[]", (f"image_{i}.png", img_byte_arr, "image/png")))
if mask is not None:
if len(flat) != 1:
raise Exception("Cannot use a mask with multiple image")
ref_image = flat[0]
if mask.shape[1:] != ref_image.shape[1:-1]:
raise Exception("Mask and Image must be the same size")
_, height, width = mask.shape
rgba_mask = torch.zeros(height, width, 4, device="cpu")
rgba_mask[:, :, 3] = 1 - mask.squeeze().cpu()
scaled_mask = downscale_image_tensor(
rgba_mask.unsqueeze(0), total_pixels=2048 * 2048
).squeeze()
mask_np = (scaled_mask.numpy() * 255).astype(np.uint8)
mask_img = Image.fromarray(mask_np)
mask_img_byte_arr = BytesIO()
mask_img.save(mask_img_byte_arr, format="PNG")
mask_img_byte_arr.seek(0)
files.append(("mask", ("mask.png", mask_img_byte_arr, "image/png")))
response = await sync_op(
cls,
ApiEndpoint(path="/proxy/openai/images/edits", method="POST"),
response_model=OpenAIImageGenerationResponse,
data=OpenAIImageEditRequest(
model=model_id,
prompt=prompt,
quality=quality,
background=background,
n=n,
size=size,
moderation="low",
),
content_type="multipart/form-data",
files=files,
price_extractor=price_extractor,
)
else:
response = await sync_op(
cls,
ApiEndpoint(path="/proxy/openai/images/generations", method="POST"),
response_model=OpenAIImageGenerationResponse,
data=OpenAIImageGenerationRequest(
model=model_id,
prompt=prompt,
quality=quality,
background=background,
n=n,
size=size,
moderation="low",
),
price_extractor=price_extractor,
)
return IO.NodeOutput(await validate_and_cast_response(response))
class OpenAIChatNode(IO.ComfyNode):
"""
Node to generate text responses from an OpenAI model.
@ -999,6 +1311,7 @@ class OpenAIExtension(ComfyExtension):
OpenAIDalle2,
OpenAIDalle3,
OpenAIGPTImage1,
OpenAIGPTImageNodeV2,
OpenAIChatNode,
OpenAIInputFiles,
OpenAIChatConfig,

View File

@ -60,6 +60,7 @@ async def poll_until_finished(
],
status_extractor=lambda x: x.data.status,
progress_extractor=lambda x: x.data.progress,
price_extractor=lambda x: x.data.consumed_credit * 0.01 if x.data.consumed_credit else None,
estimated_duration=average_duration,
)
if response_poll.data.status == TripoTaskStatus.SUCCESS:
@ -113,7 +114,6 @@ class TripoTextToModelNode(IO.ComfyNode):
depends_on=IO.PriceBadgeDepends(
widgets=[
"model_version",
"style",
"texture",
"pbr",
"quad",
@ -124,20 +124,17 @@ class TripoTextToModelNode(IO.ComfyNode):
expr="""
(
$isV14 := $contains(widgets.model_version,"v1.4");
$style := widgets.style;
$hasStyle := ($style != "" and $style != "none");
$isV3OrLater := $contains(widgets.model_version,"v3.");
$withTexture := widgets.texture or widgets.pbr;
$isHdTexture := (widgets.texture_quality = "detailed");
$isDetailedGeometry := (widgets.geometry_quality = "detailed");
$baseCredits :=
$isV14 ? 20 : ($withTexture ? 20 : 10);
$credits :=
$baseCredits
+ ($hasStyle ? 5 : 0)
$credits := $isV14 ? 20 : (
($withTexture ? 20 : 10)
+ (widgets.quad ? 5 : 0)
+ ($isHdTexture ? 10 : 0)
+ ($isDetailedGeometry ? 20 : 0);
{"type":"usd","usd": $round($credits * 0.01, 2)}
+ (($isDetailedGeometry and $isV3OrLater) ? 20 : 0)
);
{"type":"usd","usd": $round($credits * 0.01, 2), "format": {"approximate": true}}
)
""",
),
@ -239,7 +236,6 @@ class TripoImageToModelNode(IO.ComfyNode):
depends_on=IO.PriceBadgeDepends(
widgets=[
"model_version",
"style",
"texture",
"pbr",
"quad",
@ -250,20 +246,17 @@ class TripoImageToModelNode(IO.ComfyNode):
expr="""
(
$isV14 := $contains(widgets.model_version,"v1.4");
$style := widgets.style;
$hasStyle := ($style != "" and $style != "none");
$isV3OrLater := $contains(widgets.model_version,"v3.");
$withTexture := widgets.texture or widgets.pbr;
$isHdTexture := (widgets.texture_quality = "detailed");
$isDetailedGeometry := (widgets.geometry_quality = "detailed");
$baseCredits :=
$isV14 ? 30 : ($withTexture ? 30 : 20);
$credits :=
$baseCredits
+ ($hasStyle ? 5 : 0)
$credits := $isV14 ? 30 : (
($withTexture ? 30 : 20)
+ (widgets.quad ? 5 : 0)
+ ($isHdTexture ? 10 : 0)
+ ($isDetailedGeometry ? 20 : 0);
{"type":"usd","usd": $round($credits * 0.01, 2)}
+ (($isDetailedGeometry and $isV3OrLater) ? 20 : 0)
);
{"type":"usd","usd": $round($credits * 0.01, 2), "format": {"approximate": true}}
)
""",
),
@ -358,7 +351,7 @@ class TripoMultiviewToModelNode(IO.ComfyNode):
"texture_alignment", default="original_image", options=["original_image", "geometry"], optional=True, advanced=True
),
IO.Int.Input("face_limit", default=-1, min=-1, max=500000, optional=True, advanced=True),
IO.Boolean.Input("quad", default=False, optional=True, advanced=True),
IO.Boolean.Input("quad", default=False, optional=True, advanced=True, tooltip="This parameter is deprecated and does nothing."),
IO.Combo.Input("geometry_quality", default="standard", options=["standard", "detailed"], optional=True, advanced=True),
],
outputs=[
@ -379,7 +372,6 @@ class TripoMultiviewToModelNode(IO.ComfyNode):
"model_version",
"texture",
"pbr",
"quad",
"texture_quality",
"geometry_quality",
],
@ -387,17 +379,16 @@ class TripoMultiviewToModelNode(IO.ComfyNode):
expr="""
(
$isV14 := $contains(widgets.model_version,"v1.4");
$isV3OrLater := $contains(widgets.model_version,"v3.");
$withTexture := widgets.texture or widgets.pbr;
$isHdTexture := (widgets.texture_quality = "detailed");
$isDetailedGeometry := (widgets.geometry_quality = "detailed");
$baseCredits :=
$isV14 ? 30 : ($withTexture ? 30 : 20);
$credits :=
$baseCredits
+ (widgets.quad ? 5 : 0)
$credits := $isV14 ? 30 : (
($withTexture ? 30 : 20)
+ ($isHdTexture ? 10 : 0)
+ ($isDetailedGeometry ? 20 : 0);
{"type":"usd","usd": $round($credits * 0.01, 2)}
+ (($isDetailedGeometry and $isV3OrLater) ? 20 : 0)
);
{"type":"usd","usd": $round($credits * 0.01, 2), "format": {"approximate": true}}
)
""",
),
@ -457,7 +448,7 @@ class TripoMultiviewToModelNode(IO.ComfyNode):
geometry_quality=geometry_quality,
texture_alignment=texture_alignment,
face_limit=face_limit if face_limit != -1 else None,
quad=quad,
quad=None,
),
)
return await poll_until_finished(cls, response, average_duration=80)
@ -498,7 +489,7 @@ class TripoTextureNode(IO.ComfyNode):
expr="""
(
$tq := widgets.texture_quality;
{"type":"usd","usd": ($contains($tq,"detailed") ? 0.2 : 0.1)}
{"type":"usd","usd": ($contains($tq,"detailed") ? 0.2 : 0.1), "format": {"approximate": true}}
)
""",
),
@ -555,7 +546,7 @@ class TripoRefineNode(IO.ComfyNode):
is_api_node=True,
is_output_node=True,
price_badge=IO.PriceBadge(
expr="""{"type":"usd","usd":0.3}""",
expr="""{"type":"usd","usd":0.3, "format": {"approximate": true}}""",
),
)
@ -592,7 +583,7 @@ class TripoRigNode(IO.ComfyNode):
is_api_node=True,
is_output_node=True,
price_badge=IO.PriceBadge(
expr="""{"type":"usd","usd":0.25}""",
expr="""{"type":"usd","usd":0.25, "format": {"approximate": true}}""",
),
)
@ -652,7 +643,7 @@ class TripoRetargetNode(IO.ComfyNode):
is_api_node=True,
is_output_node=True,
price_badge=IO.PriceBadge(
expr="""{"type":"usd","usd":0.1}""",
expr="""{"type":"usd","usd":0.1, "format": {"approximate": true}}""",
),
)
@ -761,19 +752,10 @@ class TripoConversionNode(IO.ComfyNode):
"face_limit",
"texture_size",
"texture_format",
"force_symmetry",
"flatten_bottom",
"flatten_bottom_threshold",
"pivot_to_center_bottom",
"scale_factor",
"with_animation",
"pack_uv",
"bake",
"part_names",
"fbx_preset",
"export_vertex_colors",
"export_orientation",
"animate_in_place",
],
),
expr="""
@ -783,28 +765,16 @@ class TripoConversionNode(IO.ComfyNode):
$flatThresh := (widgets.flatten_bottom_threshold != null) ? widgets.flatten_bottom_threshold : 0;
$scale := (widgets.scale_factor != null) ? widgets.scale_factor : 1;
$texFmt := (widgets.texture_format != "" ? widgets.texture_format : "jpeg");
$part := widgets.part_names;
$fbx := (widgets.fbx_preset != "" ? widgets.fbx_preset : "blender");
$orient := (widgets.export_orientation != "" ? widgets.export_orientation : "default");
$advanced :=
widgets.quad or
widgets.force_symmetry or
widgets.flatten_bottom or
widgets.pivot_to_center_bottom or
widgets.with_animation or
widgets.pack_uv or
widgets.bake or
widgets.export_vertex_colors or
widgets.animate_in_place or
($face != -1) or
($texSize != 4096) or
($flatThresh != 0) or
($scale != 1) or
($texFmt != "jpeg") or
($part != "") or
($fbx != "blender") or
($orient != "default");
{"type":"usd","usd": ($advanced ? 0.1 : 0.05)}
($texFmt != "jpeg");
{"type":"usd","usd": ($advanced ? 0.1 : 0.05), "format": {"approximate": true}}
)
""",
),

View File

@ -63,7 +63,7 @@ class MathExpressionNode(io.ComfyNode):
@classmethod
def define_schema(cls) -> io.Schema:
autogrow = io.Autogrow.TemplateNames(
input=io.MultiType.Input("value", [io.Float, io.Int]),
input=io.MultiType.Input("value", [io.Float, io.Int, io.Boolean]),
names=list(string.ascii_lowercase),
min=1,
)
@ -82,6 +82,7 @@ class MathExpressionNode(io.ComfyNode):
outputs=[
io.Float.Output(display_name="FLOAT"),
io.Int.Output(display_name="INT"),
io.Boolean.Output(display_name="BOOL"),
],
)
@ -97,7 +98,7 @@ class MathExpressionNode(io.ComfyNode):
result = simple_eval(expression, names=context, functions=MATH_FUNCTIONS)
# bool check must come first because bool is a subclass of int in Python
if isinstance(result, bool) or not isinstance(result, (int, float)):
if not isinstance(result, (int, float)):
raise ValueError(
f"Math Expression '{expression}' must evaluate to a numeric result, "
f"got {type(result).__name__}: {result!r}"
@ -106,7 +107,7 @@ class MathExpressionNode(io.ComfyNode):
raise ValueError(
f"Math Expression '{expression}' produced a non-finite result: {result}"
)
return io.NodeOutput(float(result), int(result))
return io.NodeOutput(float(result), int(result), bool(result))
class MathExtension(ComfyExtension):

View File

@ -116,7 +116,7 @@ class EmptyQwenImageLayeredLatentImage(io.ComfyNode):
inputs=[
io.Int.Input("width", default=640, min=16, max=nodes.MAX_RESOLUTION, step=16),
io.Int.Input("height", default=640, min=16, max=nodes.MAX_RESOLUTION, step=16),
io.Int.Input("layers", default=3, min=0, max=nodes.MAX_RESOLUTION, step=1, advanced=True),
io.Int.Input("layers", default=3, min=0, max=nodes.MAX_RESOLUTION, step=1),
io.Int.Input("batch_size", default=1, min=1, max=4096),
],
outputs=[

View File

@ -0,0 +1,971 @@
import math
import nodes
import node_helpers
import torch
import torchaudio
import comfy.model_management
import comfy.utils
import numpy as np
import logging
from typing_extensions import override
from comfy_api.latest import ComfyExtension, io
import scipy.signal
import scipy.ndimage
import scipy.fft
import scipy.sparse
# Audio Processing Functions - Derived from librosa (https://github.com/librosa/librosa)
# Copyright (c) 2013--2023, librosa development team.
def mel_to_hz(mels, htk=False):
"""Convert mel to Hz (slaney)"""
mels = np.asanyarray(mels)
if htk:
return 700.0 * (10.0 ** (mels / 2595.0) - 1.0)
f_min = 0.0
f_sp = 200.0 / 3
freqs = f_min + f_sp * mels
min_log_hz = 1000.0
min_log_mel = (min_log_hz - f_min) / f_sp
logstep = np.log(6.4) / 27.0
if mels.ndim:
log_t = mels >= min_log_mel
freqs[log_t] = min_log_hz * np.exp(logstep * (mels[log_t] - min_log_mel))
elif mels >= min_log_mel:
freqs = min_log_hz * np.exp(logstep * (mels - min_log_mel))
return freqs
def hz_to_mel(frequencies, htk=False):
"""Convert Hz to mel (slaney)"""
frequencies = np.asanyarray(frequencies)
if htk:
return 2595.0 * np.log10(1.0 + frequencies / 700.0)
f_min = 0.0
f_sp = 200.0 / 3
mels = (frequencies - f_min) / f_sp
min_log_hz = 1000.0
min_log_mel = (min_log_hz - f_min) / f_sp
logstep = np.log(6.4) / 27.0
if frequencies.ndim:
log_t = frequencies >= min_log_hz
mels[log_t] = min_log_mel + np.log(frequencies[log_t] / min_log_hz) / logstep
elif frequencies >= min_log_hz:
mels = min_log_mel + np.log(frequencies / min_log_hz) / logstep
return mels
def compute_cqt(y, sr=22050, hop_length=512, fmin=None, n_bins=84, bins_per_octave=12, tuning=0.0):
"""Compute Constant-Q Transform (CQT) spectrogram."""
def _relative_bandwidth(freqs):
bpo = np.empty_like(freqs)
logf = np.log2(freqs)
bpo[0] = 1.0 / (logf[1] - logf[0])
bpo[-1] = 1.0 / (logf[-1] - logf[-2])
bpo[1:-1] = 2.0 / (logf[2:] - logf[:-2])
return (2.0 ** (2.0 / bpo) - 1.0) / (2.0 ** (2.0 / bpo) + 1.0)
def _wavelet_lengths(freqs, sr, filter_scale, alpha):
Q = float(filter_scale) / alpha
return Q * sr / freqs # shape (n_bins,) floats
def _build_wavelet(freqs_oct, sr, filter_scale, alpha_oct):
lengths = _wavelet_lengths(freqs_oct, sr, filter_scale, alpha_oct)
filters = []
for ilen, freq in zip(lengths, freqs_oct):
t = np.arange(int(-ilen // 2), int(ilen // 2), dtype=float)
sig = (np.cos(t * 2 * np.pi * freq / sr)
+ 1j * np.sin(t * 2 * np.pi * freq / sr)).astype(np.complex64)
sig *= scipy.signal.get_window('hann', len(sig), fftbins=True)
l1 = np.sum(np.abs(sig))
tiny = np.finfo(np.float32).tiny
sig /= max(l1, tiny)
filters.append(sig)
max_len = max(lengths)
n_fft = int(2.0 ** np.ceil(np.log2(max_len)))
out = np.zeros((len(filters), n_fft), dtype=np.complex64)
for k, f in enumerate(filters):
lpad = int((n_fft - len(f)) // 2)
out[k, lpad: lpad + len(f)] = f
return out, lengths
def _resample_half(y):
ratio = 0.5
n_samples = int(np.ceil(len(y) * ratio))
# Kaiser-windowed FIR matches librosa/soxr more closely than scipy's default Hamming filter
L = 2
h = scipy.signal.firwin(160 * L + 1, 0.96 / L, window=('kaiser', 6.5))
y_hat = scipy.signal.resample_poly(y.astype(np.float32), 1, 2, window=h)
if len(y_hat) > n_samples:
y_hat = y_hat[:n_samples]
elif len(y_hat) < n_samples:
y_hat = np.pad(y_hat, (0, n_samples - len(y_hat)))
y_hat /= np.sqrt(ratio)
return y_hat.astype(np.float32)
def _sparsify_rows(x, quantile=0.01):
mags = np.abs(x)
norms = np.sum(mags, axis=1, keepdims=True)
norms = np.where(norms == 0, 1.0, norms)
mag_sort = np.sort(mags, axis=1)
cumulative_mag = np.cumsum(mag_sort / norms, axis=1)
threshold_idx = np.argmin(cumulative_mag < quantile, axis=1)
x_sparse = scipy.sparse.lil_matrix(x.shape, dtype=x.dtype)
for i, j in enumerate(threshold_idx):
idx = np.where(mags[i] >= mag_sort[i, j])
x_sparse[i, idx] = x[i, idx]
return x_sparse.tocsr()
if fmin is None:
fmin = 32.70319566257483 # C1 note frequency
fmin = fmin * (2.0 ** (tuning / bins_per_octave))
freqs = fmin * (2.0 ** (np.arange(n_bins) / bins_per_octave))
alpha = _relative_bandwidth(freqs)
lengths = _wavelet_lengths(freqs, float(sr), 1, alpha)
n_octaves = int(np.ceil(float(n_bins) / bins_per_octave))
n_filters = min(bins_per_octave, n_bins)
cqt_resp = []
my_y = y.astype(np.float32)
my_sr = float(sr)
my_hop = int(hop_length)
for i in range(n_octaves):
if i == 0:
sl = slice(-n_filters, None)
else:
sl = slice(-n_filters * (i + 1), -n_filters * i)
freqs_oct = freqs[sl]
alpha_oct = alpha[sl]
basis, basis_lengths = _build_wavelet(freqs_oct, my_sr, 1, alpha_oct)
n_fft_oct = basis.shape[1]
# Frequency-domain normalisation
basis = basis.astype(np.complex64)
basis *= basis_lengths[:, np.newaxis] / float(n_fft_oct)
fft_basis = scipy.fft.fft(basis, n=n_fft_oct, axis=1)[:, :(n_fft_oct // 2) + 1]
fft_basis = _sparsify_rows(fft_basis, quantile=0.01)
fft_basis = fft_basis * np.sqrt(sr / my_sr)
y_pad = np.pad(my_y, int(n_fft_oct // 2), mode='constant')
n_frames = 1 + (len(y_pad) - n_fft_oct) // my_hop
frames = np.lib.stride_tricks.as_strided(
y_pad,
shape=(n_fft_oct, n_frames),
strides=(y_pad.strides[0], y_pad.strides[0] * my_hop),
)
stft_result = scipy.fft.rfft(frames, axis=0)
cqt_resp.append(fft_basis.dot(stft_result))
if my_hop % 2 == 0:
my_hop //= 2
my_sr /= 2.0
my_y = _resample_half(my_y)
max_col = min(c.shape[-1] for c in cqt_resp)
cqt_out = np.empty((n_bins, max_col), dtype=np.complex64)
end = n_bins
for c_i in cqt_resp:
n_oct = c_i.shape[0]
if end < n_oct:
cqt_out[:end, :] = c_i[-end:, :max_col]
else:
cqt_out[end - n_oct:end, :] = c_i[:, :max_col]
end -= n_oct
cqt_out /= np.sqrt(lengths)[:, np.newaxis]
return np.abs(cqt_out).astype(np.float32)
def cq_to_chroma_mapping(n_input, bins_per_octave=12, n_chroma=12, fmin=None):
"""Map CQT bins to chroma bins."""
if fmin is None:
fmin = 32.70319566257483 # C1 note frequency
n_merge = bins_per_octave / n_chroma
cq_to_ch = np.repeat(np.eye(n_chroma), int(n_merge), axis=1)
cq_to_ch = np.roll(cq_to_ch, -int(n_merge // 2), axis=1)
n_octaves = int(np.ceil(n_input / bins_per_octave))
cq_to_ch = np.tile(cq_to_ch, n_octaves)[:, :n_input]
midi_0 = np.mod(12 * np.log2(fmin / 440.0) + 69, 12)
roll = int(np.round(midi_0 * (n_chroma / 12.0)))
cq_to_ch = np.roll(cq_to_ch, roll, axis=0)
return cq_to_ch.astype(np.float32)
def _parabolic_interpolation(S, axis=-2):
"""Compute parabolic interpolation shift for peak refinement."""
S_next = np.roll(S, -1, axis=axis)
S_prev = np.roll(S, 1, axis=axis)
a = S_next + S_prev - 2 * S
b = (S_next - S_prev) / 2.0
shifts = np.zeros_like(S)
valid = np.abs(b) < np.abs(a)
shifts[valid] = -b[valid] / a[valid]
if axis == -2 or axis == S.ndim - 2:
shifts[0, :] = 0
shifts[-1, :] = 0
elif axis == 0:
shifts[0, ...] = 0
shifts[-1, ...] = 0
return shifts
def _localmax(S, axis=-2):
"""Find local maxima along an axis."""
S_prev = np.roll(S, 1, axis=axis)
S_next = np.roll(S, -1, axis=axis)
local_max = (S > S_prev) & (S >= S_next)
if axis == -2 or axis == S.ndim - 2:
local_max[-1, :] = S[-1, :] > S[-2, :]
# First element is never a local max (strict inequality with previous)
local_max[0, :] = False
elif axis == 0:
local_max[-1, ...] = S[-1, ...] > S[-2, ...]
local_max[0, ...] = False
return local_max
def piptrack(y=None, sr=22050, S=None, n_fft=2048, hop_length=512,
fmin=150.0, fmax=4000.0, threshold=0.1):
"""Pitch tracking on thresholded parabolically-interpolated STFT."""
# Compute STFT if not provided
if S is None:
if y is None:
raise ValueError("Either y or S must be provided")
fft_window = scipy.signal.get_window('hann', n_fft, fftbins=True)
if len(fft_window) < n_fft:
lpad = int((n_fft - len(fft_window)) // 2)
fft_window = np.pad(fft_window, (lpad, int(n_fft - len(fft_window) - lpad)), mode='constant')
fft_window = fft_window.reshape((-1, 1))
y_pad = np.pad(y, int(n_fft // 2), mode='constant')
n_frames = 1 + (len(y_pad) - n_fft) // hop_length
frames = np.lib.stride_tricks.as_strided(
y_pad,
shape=(n_fft, n_frames),
strides=(y_pad.strides[0], y_pad.strides[0] * hop_length)
)
S = scipy.fft.rfft((fft_window * frames).astype(np.float32), axis=0)
S = np.abs(S)
fmin = max(fmin, 0)
fmax = min(fmax, float(sr) / 2)
fft_freqs = np.fft.rfftfreq(S.shape[0] * 2 - 2, 1.0 / sr)
if len(fft_freqs) > S.shape[0]:
fft_freqs = fft_freqs[:S.shape[0]]
shift = _parabolic_interpolation(S, axis=0)
avg = np.gradient(S, axis=0)
dskew = 0.5 * avg * shift
pitches = np.zeros_like(S)
mags = np.zeros_like(S)
freq_mask = (fmin <= fft_freqs) & (fft_freqs < fmax)
freq_mask = freq_mask.reshape(-1, 1)
ref_value = threshold * np.max(S, axis=0, keepdims=True)
local_max = _localmax(S * (S > ref_value), axis=0)
idx = np.nonzero(freq_mask & local_max)
pitches[idx] = (idx[0] + shift[idx]) * float(sr) / (S.shape[0] * 2 - 2)
mags[idx] = S[idx] + dskew[idx]
return pitches, mags
def hz_to_octs(frequencies, tuning=0.0, bins_per_octave=12):
"""Convert frequencies (Hz) to octave numbers."""
A440 = 440.0 * 2.0 ** (tuning / bins_per_octave)
octs = np.log2(np.asanyarray(frequencies) / (float(A440) / 16))
return octs
def pitch_tuning(frequencies, resolution=0.01, bins_per_octave=12):
"""Estimate tuning offset from a collection of pitches."""
frequencies = np.atleast_1d(frequencies)
frequencies = frequencies[frequencies > 0]
if not np.any(frequencies):
return 0.0
residual = np.mod(bins_per_octave * hz_to_octs(frequencies, tuning=0.0,
bins_per_octave=bins_per_octave), 1.0)
residual[residual >= 0.5] -= 1.0
bins = np.linspace(-0.5, 0.5, int(np.ceil(1.0 / resolution)) + 1)
counts, tuning = np.histogram(residual, bins)
tuning_est = tuning[np.argmax(counts)]
return tuning_est
def estimate_tuning(y, sr=22050, bins_per_octave=12):
"""Estimate global tuning deviation from 12-TET."""
n_fft = 2048
hop_length = 512
if len(y) < n_fft:
return 0.0
pitch, mag = piptrack(y=y, sr=sr, n_fft=n_fft, hop_length=hop_length,
fmin=150.0, fmax=4000.0, threshold=0.1)
pitch_mask = pitch > 0
if not pitch_mask.any():
return 0.0
threshold = np.median(mag[pitch_mask])
valid_pitches = pitch[(mag >= threshold) & pitch_mask]
if len(valid_pitches) == 0:
return 0.0
tuning = pitch_tuning(valid_pitches, resolution=0.01, bins_per_octave=bins_per_octave)
return float(tuning)
def compute_chroma_cens(y, sr=22050, hop_length=512, n_chroma=12,
n_octaves=7, bins_per_octave=36,
win_len_smooth=41, norm=2):
"""Compute Chroma Energy Normalized Statistics (CENS) features."""
tuning = estimate_tuning(y, sr, bins_per_octave=bins_per_octave)
fmin = 32.70319566257483 # C1 note frequency
n_bins = n_octaves * bins_per_octave
cqt_mag = compute_cqt(y, sr=sr, hop_length=hop_length,
fmin=fmin, n_bins=n_bins,
bins_per_octave=bins_per_octave,
tuning=tuning)
chroma_map = cq_to_chroma_mapping(n_bins, bins_per_octave=bins_per_octave,
n_chroma=n_chroma, fmin=fmin)
chroma = np.dot(chroma_map, cqt_mag)
threshold = np.finfo(chroma.dtype).tiny
chroma_sum = np.sum(np.abs(chroma), axis=0, keepdims=True)
chroma_sum = np.maximum(chroma_sum, threshold)
chroma = chroma / chroma_sum
quant_steps = [0.4, 0.2, 0.1, 0.05]
quant_weights = [0.25, 0.25, 0.25, 0.25]
chroma_quant = np.zeros_like(chroma)
for step, weight in zip(quant_steps, quant_weights):
chroma_quant += (chroma > step) * weight
if win_len_smooth is not None and win_len_smooth > 0:
win = scipy.signal.get_window('hann', win_len_smooth + 2, fftbins=False)
win /= np.sum(win)
win = win.reshape(1, -1)
chroma_smooth = scipy.ndimage.convolve(chroma_quant, win, mode='constant')
else:
chroma_smooth = chroma_quant
if norm == 2:
threshold = np.finfo(chroma_smooth.dtype).tiny
chroma_norm = np.sqrt(np.sum(chroma_smooth ** 2, axis=0, keepdims=True))
chroma_norm = np.maximum(chroma_norm, threshold)
chroma_smooth = chroma_smooth / chroma_norm
elif norm == np.inf:
threshold = np.finfo(chroma_smooth.dtype).tiny
chroma_norm = np.max(np.abs(chroma_smooth), axis=0, keepdims=True)
chroma_norm = np.maximum(chroma_norm, threshold)
chroma_smooth = chroma_smooth / chroma_norm
return chroma_smooth
def _create_mel_filterbank(sr, n_fft, n_mels=128, fmin=0.0, fmax=None):
"""Create mel-scale filterbank matrix."""
if fmax is None:
fmax = sr / 2.0
mel_basis = np.zeros((n_mels, int(1 + n_fft // 2)), dtype=np.float32)
fftfreqs = np.fft.rfftfreq(n=n_fft, d=1.0 / sr)
min_mel = hz_to_mel(fmin)
max_mel = hz_to_mel(fmax)
mels = np.linspace(min_mel, max_mel, n_mels + 2)
mel_f = mel_to_hz(mels)
fdiff = np.diff(mel_f)
ramps = np.subtract.outer(mel_f, fftfreqs)
for i in range(n_mels):
lower = -ramps[i] / fdiff[i]
upper = ramps[i + 2] / fdiff[i + 1]
mel_basis[i] = np.maximum(0, np.minimum(lower, upper))
enorm = 2.0 / (mel_f[2:n_mels + 2] - mel_f[:n_mels])
mel_basis *= enorm[:, np.newaxis]
return mel_basis
def _compute_mel_spectrogram(data, sr, n_fft=2048, hop_length=512, n_mels=128):
"""Compute mel spectrogram from audio signal."""
fft_window = scipy.signal.get_window('hann', n_fft, fftbins=True)
if len(fft_window) < n_fft:
lpad = int((n_fft - len(fft_window)) // 2)
fft_window = np.pad(fft_window, (lpad, int(n_fft - len(fft_window) - lpad)), mode='constant')
fft_window = fft_window.reshape((-1, 1))
data_padded = np.pad(data, int(n_fft // 2), mode='constant')
n_frames = 1 + (len(data_padded) - n_fft) // hop_length
shape = (n_fft, n_frames)
strides = (data_padded.strides[0], data_padded.strides[0] * hop_length)
frames = np.lib.stride_tricks.as_strided(data_padded, shape=shape, strides=strides)
stft_result = scipy.fft.rfft(fft_window * frames, axis=0).astype(np.complex64)
power_spec = np.abs(stft_result) ** 2
mel_basis = _create_mel_filterbank(sr, n_fft, n_mels=n_mels, fmin=0.0, fmax=sr / 2.0)
mel_spec = np.dot(mel_basis, power_spec)
return mel_spec.astype(np.float32)
def quick_tempo_estimate(audio_np, sr, start_bpm=120.0, std_bpm=1.0, hop_length=512):
"""Estimate tempo using autocorrelation tempogram."""
if len(audio_np) < hop_length * 10:
logging.warning("Audio too short for tempo estimation, returning default BPM of 120.0")
return 120.0
n_fft = 2048
mel_S = _compute_mel_spectrogram(audio_np, sr, n_fft=n_fft, hop_length=hop_length, n_mels=128)
log_mel_S = 10.0 * np.log10(np.maximum(1e-10, mel_S))
lag = 1
S_diff = log_mel_S[:, lag:] - log_mel_S[:, :-lag]
S_onset = np.maximum(0.0, S_diff)
onset_env_pre = np.mean(S_onset, axis=0)
pad_width = lag + n_fft // (2 * hop_length)
onset_env = np.pad(onset_env_pre, (pad_width, 0), mode='constant')
onset_env = onset_env[:mel_S.shape[1]]
return estimate_tempo_from_onset(onset_env, sr, hop_length, start_bpm, std_bpm, max_tempo=320.0)
def estimate_tempo_from_onset(onset_env, sr, hop_length, start_bpm=120.0, std_bpm=1.0, max_tempo=320.0):
"""Estimate tempo from onset strength envelope using autocorrelation tempogram."""
if len(onset_env) < 20:
return 120.0
ac_size = 8.0
win_length = int(np.round(ac_size * sr / hop_length))
win_length = min(win_length, len(onset_env))
pad_width = win_length // 2
onset_padded = np.pad(onset_env, (pad_width, pad_width), mode='linear_ramp', end_values=(0, 0))
n_frames = len(onset_env)
shape = (win_length, n_frames)
strides = (onset_padded.strides[0], onset_padded.strides[0])
frames = np.lib.stride_tricks.as_strided(onset_padded, shape=shape, strides=strides)
hann_window = scipy.signal.get_window('hann', win_length, fftbins=True)
windowed_frames = frames * hann_window[:, np.newaxis]
tempogram = np.zeros((win_length, n_frames))
for i in range(n_frames):
frame = windowed_frames[:, i]
n_pad = scipy.fft.next_fast_len(2 * len(frame) - 1)
fft_result = scipy.fft.rfft(frame, n=n_pad)
powspec = np.abs(fft_result) ** 2
ac = scipy.fft.irfft(powspec, n=n_pad)
tempogram[:, i] = ac[:win_length]
ac_max = np.max(np.abs(tempogram), axis=0)
mask = ac_max > 0
tempogram[:, mask] /= ac_max[mask]
tempogram_mean = np.mean(tempogram, axis=1)
tempogram_mean = np.maximum(tempogram_mean, 0)
bpms = np.zeros(win_length, dtype=np.float64)
bpms[0] = np.inf
bpms[1:] = 60.0 * sr / (hop_length * np.arange(1.0, win_length))
logprior = -0.5 * ((np.log2(bpms) - np.log2(start_bpm)) / std_bpm) ** 2
if max_tempo is not None:
max_idx = int(np.argmax(bpms < max_tempo))
if max_idx > 0:
logprior[:max_idx] = -np.inf
weighted = np.log1p(1e6 * tempogram_mean) + logprior
best_idx = int(np.argmax(weighted[1:])) + 1
tempo = bpms[best_idx]
return tempo
def detect_onset_peaks(onset_env, sr=22050, hop_length=512, pre_max=0.03, post_max=0.0,
pre_avg=0.10, post_avg=0.10, wait=0.03, delta=0.07):
"""Detect onset peaks using peak picking algorithm."""
onset_normalized = onset_env - np.min(onset_env)
onset_max = np.max(onset_normalized)
if onset_max > 0:
onset_normalized = onset_normalized / onset_max
pre_max_frames = int(pre_max * sr / hop_length)
post_max_frames = int(post_max * sr / hop_length) + 1
pre_avg_frames = int(pre_avg * sr / hop_length)
post_avg_frames = int(post_avg * sr / hop_length) + 1
wait_frames = int(wait * sr / hop_length)
peaks = np.zeros(len(onset_normalized), dtype=bool)
peaks[0] = (onset_normalized[0] >= np.max(onset_normalized[:min(post_max_frames, len(onset_normalized))]))
peaks[0] &= (onset_normalized[0] >= np.mean(onset_normalized[:min(post_avg_frames, len(onset_normalized))]) + delta)
if peaks[0]:
n = wait_frames + 1
else:
n = 1
while n < len(onset_normalized):
maxn = np.max(onset_normalized[max(0, n - pre_max_frames):min(n + post_max_frames, len(onset_normalized))])
peaks[n] = (onset_normalized[n] == maxn)
if not peaks[n]:
n += 1
continue
avgn = np.mean(onset_normalized[max(0, n - pre_avg_frames):min(n + post_avg_frames, len(onset_normalized))])
peaks[n] &= (onset_normalized[n] >= avgn + delta)
if not peaks[n]:
n += 1
continue
n += wait_frames + 1
return np.flatnonzero(peaks).astype(np.int32)
def track_beats(onset_env, tempo, sr, hop_length, tightness=100, trim=True):
"""Track beats using dynamic programming."""
frame_rate = sr / hop_length
frames_per_beat = np.round(frame_rate * 60.0 / tempo)
if frames_per_beat <= 0 or len(onset_env) < 2:
return np.array([], dtype=np.int32)
onset_std = np.std(onset_env, ddof=1)
if onset_std > 0:
onset_normalized = onset_env / onset_std
else:
onset_normalized = onset_env
window_range = np.arange(-frames_per_beat, frames_per_beat + 1)
window = np.exp(-0.5 * (window_range * 32.0 / frames_per_beat) ** 2)
localscore = scipy.signal.convolve(onset_normalized, window, mode='same')
backlink = np.full(len(localscore), -1, dtype=np.int32)
cumscore = np.zeros(len(localscore), dtype=np.float64)
score_thresh = 0.01 * localscore.max()
first_beat = True
backlink[0] = -1
cumscore[0] = localscore[0]
fpb = int(frames_per_beat)
for i in range(1, len(localscore)):
score_i = localscore[i]
best_score = -np.inf
beat_location = -1
search_start = int(i - np.round(fpb / 2.0))
search_end = int(i - 2 * fpb - 1)
for loc in range(search_start, search_end, -1):
if loc < 0:
break
score = cumscore[loc] - tightness * (np.log(i - loc) - np.log(fpb)) ** 2
if score > best_score:
best_score = score
beat_location = loc
if beat_location >= 0:
cumscore[i] = score_i + best_score
else:
cumscore[i] = score_i
if first_beat and score_i < score_thresh:
backlink[i] = -1
else:
backlink[i] = beat_location
first_beat = False
local_max_mask = np.zeros(len(cumscore), dtype=bool)
local_max_mask[0] = False
for i in range(1, len(cumscore) - 1):
local_max_mask[i] = (cumscore[i] > cumscore[i-1]) and (cumscore[i] >= cumscore[i+1])
if len(cumscore) > 1:
local_max_mask[-1] = cumscore[-1] > cumscore[-2]
if np.any(local_max_mask):
median_max = np.median(cumscore[local_max_mask])
threshold = 0.5 * median_max
tail = -1
for i in range(len(cumscore) - 1, -1, -1):
if local_max_mask[i] and cumscore[i] >= threshold:
tail = i
break
else:
tail = len(cumscore) - 1
beats = np.zeros(len(localscore), dtype=bool)
n = tail
visited = set()
while n >= 0 and n not in visited:
beats[n] = True
visited.add(n)
n = backlink[n]
if trim and np.any(beats):
beat_positions = np.flatnonzero(beats)
beat_localscores = localscore[beat_positions]
w = np.hanning(5)
smooth_boe_full = np.convolve(beat_localscores, w)
smooth_boe = smooth_boe_full[len(w)//2 : len(localscore) + len(w)//2]
threshold = 0.5 * np.sqrt(np.mean(smooth_boe ** 2))
start_frame = 0
while start_frame < len(localscore) and localscore[start_frame] <= threshold:
beats[start_frame] = False
start_frame += 1
end_frame = len(localscore) - 1
while end_frame >= 0 and localscore[end_frame] <= threshold:
beats[end_frame] = False
end_frame -= 1
return np.flatnonzero(beats).astype(np.int32)
def compute_onset_envelope(mel_spec_db, n_fft=2048, hop_length=512):
"""Compute onset strength envelope from a log-mel spectrogram (dB)."""
lag = 1
onset_diff = mel_spec_db[:, lag:] - mel_spec_db[:, :-lag]
onset_diff = np.maximum(0.0, onset_diff)
envelope_pre_pad = np.mean(onset_diff, axis=0)
pad_width = lag + n_fft // (2 * hop_length)
envelope = np.pad(envelope_pre_pad, (pad_width, 0), mode='constant')
envelope = envelope[:mel_spec_db.shape[1]]
return envelope
def compute_mfcc(mel_spec_db, n_mfcc=20):
"""Compute MFCC features from a log-mel spectrogram (dB)."""
mfcc = scipy.fft.dct(mel_spec_db, axis=0, type=2, norm='ortho')[:n_mfcc].T
return mfcc.astype(np.float32)
def power_to_db(S, amin=1e-10, top_db=80.0, ref=1.0):
"""Convert a power spectrogram (amplitude squared) to decibel (dB) units"""
S = np.asarray(S)
log_spec = 10.0 * np.log10(np.maximum(amin, S))
log_spec -= 10.0 * np.log10(np.maximum(amin, ref))
if top_db is not None:
log_spec = np.maximum(log_spec, log_spec.max() - top_db)
return log_spec
class WanDancerEncodeAudio(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.Schema(
node_id="WanDancerEncodeAudio",
category="conditioning/video_models",
inputs=[
io.Audio.Input("audio"),
io.Int.Input("video_frames", default=149, min=1, max=nodes.MAX_RESOLUTION, step=4),
io.Float.Input("audio_inject_scale", default=1.0, min=0.0, max=10.0, step=0.01, tooltip="The scale for the audio features when injected into the video model."),
],
outputs=[
io.AudioEncoderOutput.Output(display_name="audio_encoder_output"),
io.String.Output(display_name="fps_string", tooltip="The calculated fps based on the audio length and the number of video frames. Used in the prompt."),
],
)
@classmethod
def execute(cls, video_frames, audio_inject_scale, audio) -> io.NodeOutput:
waveform = audio["waveform"][0]
sample_rate = audio["sample_rate"]
base_fps = 30
hop_length = 512
model_sr = 22050
n_fft = 2048
# start tempo from original audio (not the resampled one) to match the reference pipeline
if waveform.shape[0] > 1:
waveform = waveform.mean(dim=0, keepdim=False)
start_bpm = quick_tempo_estimate(waveform.squeeze().cpu().numpy(), sample_rate, hop_length=hop_length)
# resample to the sample rate used for feature extraction
resample_sr = base_fps * hop_length
waveform = torchaudio.functional.resample(waveform, sample_rate, resample_sr)
waveform_np = waveform.cpu().numpy().squeeze()
mel_spec = _compute_mel_spectrogram(waveform_np, model_sr, n_fft, hop_length, n_mels=128)
mel_spec_db = power_to_db(mel_spec, amin=1e-10, top_db=80.0, ref=1.0)
envelope = compute_onset_envelope(mel_spec_db, n_fft, hop_length)
mfcc = compute_mfcc(mel_spec_db, n_mfcc=20)
chroma = compute_chroma_cens(y=waveform_np, sr=model_sr, hop_length=hop_length).T
# detect peaks
peak_idxs = detect_onset_peaks(envelope, sr=model_sr, hop_length=hop_length)
peak_onehot = np.zeros_like(envelope, dtype=np.float32)
peak_onehot[peak_idxs] = 1.0
# detect beats
beat_tracking_tempo = estimate_tempo_from_onset(envelope, sr=model_sr, hop_length=hop_length, start_bpm=start_bpm)
beat_idxs = track_beats(envelope, beat_tracking_tempo, model_sr, hop_length, tightness=100, trim=True)
beat_onehot = np.zeros_like(envelope, dtype=np.float32)
beat_onehot[beat_idxs] = 1.0
audio_feature = np.concatenate(
[envelope[:, None], mfcc, chroma, peak_onehot[:, None], beat_onehot[:, None]],
axis=-1,
)
audio_feature = torch.from_numpy(audio_feature).unsqueeze(0).to(comfy.model_management.intermediate_device())
fps = float(base_fps / int(audio_feature.shape[1] / video_frames + 0.5))
audio_encoder_output = {
"audio_feature": audio_feature,
"fps": fps,
"audio_inject_scale": audio_inject_scale,
}
if int(fps + 0.5) != 30:
fps_string = " 帧率是{:.4f}".format(fps) # "frame rate is" in Chinese, as it was in the original pipeline
else:
fps_string = ", 帧率是30fps。" # to match the reference pipeline when the fps is 30
return io.NodeOutput(audio_encoder_output, fps_string)
class WanDancerVideo(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.Schema(
node_id="WanDancerVideo",
category="conditioning/video_models",
inputs=[
io.Conditioning.Input("positive"),
io.Conditioning.Input("negative"),
io.Vae.Input("vae"),
io.Int.Input("width", default=480, min=16, max=nodes.MAX_RESOLUTION, step=16),
io.Int.Input("height", default=832, min=16, max=nodes.MAX_RESOLUTION, step=16),
io.Int.Input("length", default=149, min=1, max=nodes.MAX_RESOLUTION, step=4, tooltip="The number of frames in the generated video. Should stay 149 for WanDancer."),
io.ClipVisionOutput.Input("clip_vision_output", optional=True, tooltip="The CLIP vision embeds for the first frame."),
io.ClipVisionOutput.Input("clip_vision_output_ref", optional=True, tooltip="The CLIP vision embeds for the reference image."),
io.Image.Input("start_image", optional=True, tooltip="The initial image(s) to be encoded, can be any number of frames."),
io.Mask.Input("mask", optional=True, tooltip="Image conditioning mask for the start image(s). White is kept, black is generated. Used for the local generations."),
io.AudioEncoderOutput.Input("audio_encoder_output", optional=True),
],
outputs=[
io.Conditioning.Output(display_name="positive"),
io.Conditioning.Output(display_name="negative"),
io.Latent.Output(display_name="latent", tooltip="Empty latent."),
],
)
@classmethod
def execute(cls, positive, negative, vae, width, height, length, start_image=None, mask=None, clip_vision_output=None, clip_vision_output_ref=None, audio_encoder_output=None) -> io.NodeOutput:
latent = torch.zeros([1, 16, ((length - 1) // 4) + 1, height // 8, width // 8], device=comfy.model_management.intermediate_device())
if start_image is not None:
start_image = comfy.utils.common_upscale(start_image[:length].movedim(-1, 1), width, height, "bilinear", "center").movedim(1, -1)
image = torch.zeros((length, height, width, start_image.shape[-1]), device=start_image.device, dtype=start_image.dtype)
image[:start_image.shape[0]] = start_image
concat_latent_image = vae.encode(image[:, :, :, :3])
if mask is None:
concat_mask = torch.ones((1, 1, latent.shape[2], concat_latent_image.shape[-2], concat_latent_image.shape[-1]), device=start_image.device, dtype=start_image.dtype)
concat_mask[:, :, :((start_image.shape[0] - 1) // 4) + 1] = 0.0
else:
concat_mask = 1 - mask[:length].unsqueeze(0)
concat_mask = comfy.utils.common_upscale(concat_mask, concat_latent_image.shape[-2], concat_latent_image.shape[-1], "nearest-exact", "disabled")
concat_mask = torch.cat([torch.repeat_interleave(concat_mask[:, 0:1], repeats=4, dim=1), concat_mask[:, 1:]], dim=1)
concat_mask = concat_mask.view(1, concat_mask.shape[1] // 4, 4, concat_latent_image.shape[-2], concat_latent_image.shape[-1]).transpose(1, 2)
positive = node_helpers.conditioning_set_values(positive, {"concat_latent_image": concat_latent_image, "concat_mask": concat_mask})
negative = node_helpers.conditioning_set_values(negative, {"concat_latent_image": concat_latent_image, "concat_mask": concat_mask})
if clip_vision_output is not None:
positive = node_helpers.conditioning_set_values(positive, {"clip_vision_output": clip_vision_output, "clip_vision_output_ref": clip_vision_output_ref})
negative = node_helpers.conditioning_set_values(negative, {"clip_vision_output": clip_vision_output, "clip_vision_output_ref": clip_vision_output_ref})
if audio_encoder_output is not None:
positive = node_helpers.conditioning_set_values(positive, {"audio_embed": audio_encoder_output["audio_feature"], "fps": audio_encoder_output["fps"], "audio_inject_scale": audio_encoder_output.get("audio_inject_scale", 1.0)})
negative = node_helpers.conditioning_set_values(negative, {"audio_embed": audio_encoder_output["audio_feature"], "fps": audio_encoder_output["fps"], "audio_inject_scale": audio_encoder_output.get("audio_inject_scale", 1.0)})
out_latent = {}
out_latent["samples"] = latent
return io.NodeOutput(positive, negative, out_latent)
class WanDancerPadKeyframes(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.Schema(
node_id="WanDancerPadKeyframes",
category="image/video",
inputs=[
io.Image.Input("images",),
io.Int.Input("segment_length", default=149, min=1, max=10000, tooltip="Length of this segment (usually 149 frames)"),
io.Int.Input("segment_index", default=0, min=0, max=100, tooltip="Which segment this is (0 for first, 1 for second, etc.)"),
io.Audio.Input("audio", tooltip="Audio to calculate total output frames from and extract segment audio."),
],
outputs=[
io.Image.Output(display_name="keyframes_sequence", tooltip="Padded keyframe sequence"),
io.Mask.Output(display_name="keyframes_mask", tooltip="Mask indicating valid frames"),
io.Audio.Output(display_name="audio_segment", tooltip="Audio segment for this video segment"),
],
)
@classmethod
def do_execute(cls, images, segment_length, segment_index, audio):
B, H, W, C = images.shape
fps = 30
# calculate total frames
audio_duration = audio["waveform"].shape[-1] / audio["sample_rate"]
segment_duration = segment_length / fps
buffer = 0.2
num_segments = int((audio_duration - buffer) / segment_duration) + 1 if audio_duration > buffer else 0
total_frames = num_segments * segment_length
mask = torch.zeros((segment_length, H, W), device=images.device, dtype=images.dtype)
keyframes = torch.zeros((segment_length, H, W, C), dtype=images.dtype, device=images.device)
# guard: with no audio or no images, nothing to place — leave keyframes/mask zeroed
if total_frames > 0 and B > 0:
frame_interval = float(total_frames) / B
seg_num = int(math.ceil(total_frames / segment_length))
is_last_segment = (segment_index == seg_num - 1)
positions = []
images_before_this_segment = 0
# count images consumed by previous segments
for seg_idx in range(segment_index):
end_idx = (total_frames - segment_length * seg_idx - 1) if seg_idx == seg_num - 1 else (segment_length - 1)
cnt = 0
while cnt * frame_interval < end_idx - frame_interval:
cnt += 1
images_before_this_segment += cnt
# positions for current segment
end_index = (total_frames - segment_length * segment_index - 1) if is_last_segment else (segment_length - 1)
cnt = 0
while cnt * frame_interval < end_index - frame_interval:
pos = int(math.ceil(frame_interval * cnt))
positions.append((pos, images_before_this_segment + cnt))
cnt += 1
positions.append((end_index, images_before_this_segment + cnt))
valid_positions = [(pos, idx) for pos, idx in positions if idx < B and pos < segment_length]
if valid_positions:
seg_positions, img_indices = zip(*valid_positions)
seg_positions = torch.tensor(seg_positions, dtype=torch.long, device=images.device)
img_indices = torch.tensor(img_indices, dtype=torch.long, device=images.device)
mask[seg_positions] = 1
keyframes[seg_positions] = images[img_indices]
# extract audio segment
segment_duration = segment_length / fps
start_time = segment_index * segment_duration
end_time = min(start_time + segment_duration, audio_duration)
sample_rate = audio["sample_rate"]
start_sample = int(start_time * sample_rate)
end_sample = int(end_time * sample_rate)
audio_segment_waveform = audio["waveform"][:, :, start_sample:end_sample]
audio_segment = {
"waveform": audio_segment_waveform,
"sample_rate": sample_rate
}
return keyframes, mask, audio_segment
@classmethod
def execute(cls, images, segment_length, segment_index, audio=None) -> io.NodeOutput:
return io.NodeOutput(*cls.do_execute(images, segment_length, segment_index, audio))
class WanDancerPadKeyframesList(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.Schema(
node_id="WanDancerPadKeyframesList",
category="image/video",
inputs=[
io.Image.Input("images"),
io.Int.Input("segment_length", default=149, min=1, max=10000, tooltip="Length of each segment (usually 149 frames)"),
io.Int.Input("num_segments", default=1, min=1, max=100, tooltip="How many padded segments to emit as lists."),
io.Audio.Input("audio", tooltip="Audio to slice for each emitted segment."),
],
outputs=[
io.Image.Output(display_name="keyframes_sequence", tooltip="Padded keyframe sequences", is_output_list=True),
io.Mask.Output(display_name="keyframes_mask", tooltip="Masks indicating valid frames", is_output_list=True),
io.Audio.Output(display_name="audio_segment", tooltip="Audio segment for each video segment", is_output_list=True),
],
)
@classmethod
def execute(cls, images, segment_length, num_segments, audio=None) -> io.NodeOutput:
outputs = [WanDancerPadKeyframes.do_execute(images, segment_length, i, audio) for i in range(num_segments)]
keyframes, masks, audio_segments = zip(*outputs)
return io.NodeOutput(list(keyframes), list(masks), list(audio_segments))
class WanDancerExtension(ComfyExtension):
@override
async def get_node_list(self) -> list[type[io.ComfyNode]]:
return [
WanDancerVideo,
WanDancerEncodeAudio,
WanDancerPadKeyframes,
WanDancerPadKeyframesList,
]
async def comfy_entrypoint() -> WanDancerExtension:
return WanDancerExtension()

View File

@ -1,3 +1,3 @@
# This file is automatically generated by the build process when version is
# updated in pyproject.toml.
__version__ = "0.20.1"
__version__ = "0.21.0"

View File

@ -2434,6 +2434,7 @@ async def init_builtin_extra_nodes():
"nodes_frame_interpolation.py",
"nodes_sam3.py",
"nodes_void.py",
"nodes_wandancer.py",
]
import_failed = []

View File

@ -1,6 +1,6 @@
[project]
name = "ComfyUI"
version = "0.20.1"
version = "0.21.0"
readme = "README.md"
license = { file = "LICENSE" }
requires-python = ">=3.10"

View File

@ -1,5 +1,5 @@
comfyui-frontend-package==1.43.17
comfyui-workflow-templates==0.9.72
comfyui-frontend-package==1.43.18
comfyui-workflow-templates==0.9.73
comfyui-embedded-docs==0.4.4
torch
torchsde

View File

@ -124,9 +124,11 @@ class TestMathExpressionExecute:
with pytest.raises(Exception, match="not defined"):
self._exec("str(a)", a=42)
def test_boolean_result_raises(self):
with pytest.raises(ValueError, match="got bool"):
self._exec("a > b", a=5, b=3)
def test_boolean_result(self):
result = self._exec("a > b", a=5, b=3)
assert result[2] is True
result = self._exec("a > b", a=3, b=5)
assert result[2] is False
def test_empty_expression_raises(self):
with pytest.raises(ValueError, match="Expression cannot be empty"):