Merge branch 'master' into feature/implement-morphology-with-max-pool

This commit is contained in:
Omri Marom 2026-04-27 22:25:03 +03:00 committed by GitHub
commit 25f8e58376
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
51 changed files with 34102 additions and 1160 deletions

1
.gitignore vendored
View File

@ -21,6 +21,5 @@ venv*/
*.log
web_custom_versions/
.DS_Store
openapi.yaml
filtered-openapi.yaml
uv.lock

View File

@ -2,7 +2,6 @@
precision mediump float;
uniform sampler2D u_image0;
uniform vec2 u_resolution;
uniform int u_int0; // Blend mode
uniform int u_int1; // Color tint
uniform float u_float0; // Intensity
@ -75,7 +74,7 @@ void main() {
float t0 = threshold - 0.15;
float t1 = threshold + 0.15;
vec2 texelSize = 1.0 / u_resolution;
vec2 texelSize = 1.0 / vec2(textureSize(u_image0, 0));
float radius2 = radius * radius;
float sampleScale = clamp(radius * 0.75, 0.35, 1.0);

View File

@ -12,7 +12,6 @@ const int RADIAL_SAMPLES = 12;
const float RADIAL_STRENGTH = 0.0003;
uniform sampler2D u_image0;
uniform vec2 u_resolution;
uniform int u_int0; // Blur type (BLUR_GAUSSIAN, BLUR_BOX, BLUR_RADIAL)
uniform float u_float0; // Blur radius/amount
uniform int u_pass; // Pass index (0 = horizontal, 1 = vertical)
@ -25,7 +24,7 @@ float gaussian(float x, float sigma) {
}
void main() {
vec2 texelSize = 1.0 / u_resolution;
vec2 texelSize = 1.0 / vec2(textureSize(u_image0, 0));
float radius = max(u_float0, 0.0);
// Radial (angular) blur - single pass, doesn't use separable

View File

@ -2,14 +2,13 @@
precision highp float;
uniform sampler2D u_image0;
uniform vec2 u_resolution;
uniform float u_float0; // strength [0.0 2.0] typical: 0.31.0
in vec2 v_texCoord;
layout(location = 0) out vec4 fragColor0;
void main() {
vec2 texel = 1.0 / u_resolution;
vec2 texel = 1.0 / vec2(textureSize(u_image0, 0));
// Sample center and neighbors
vec4 center = texture(u_image0, v_texCoord);

View File

@ -2,7 +2,6 @@
precision highp float;
uniform sampler2D u_image0;
uniform vec2 u_resolution;
uniform float u_float0; // amount [0.0 - 3.0] typical: 0.5-1.5
uniform float u_float1; // radius [0.5 - 10.0] blur radius in pixels
uniform float u_float2; // threshold [0.0 - 0.1] min difference to sharpen
@ -19,7 +18,7 @@ float getLuminance(vec3 color) {
}
void main() {
vec2 texel = 1.0 / u_resolution;
vec2 texel = 1.0 / vec2(textureSize(u_image0, 0));
float radius = max(u_float1, 0.5);
float amount = u_float0;
float threshold = u_float2;

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -160,7 +160,7 @@
},
"revision": 0,
"config": {},
"name": "local-Depth to Image (Z-Image-Turbo)",
"name": "Depth to Image (Z-Image-Turbo)",
"inputNode": {
"id": -10,
"bounding": [
@ -2482,4 +2482,4 @@
"VHS_KeepIntermediate": true
},
"version": 0.4
}
}

View File

@ -261,7 +261,7 @@
},
"revision": 0,
"config": {},
"name": "local-Depth to Video (LTX 2.0)",
"name": "Depth to Video (LTX 2.0)",
"inputNode": {
"id": -10,
"bounding": [
@ -5208,4 +5208,4 @@
"workflowRendererVersion": "LG"
},
"version": 0.4
}
}

File diff suppressed because it is too large Load Diff

View File

@ -268,7 +268,7 @@
"Node name for S&R": "GLSLShader"
},
"widgets_values": [
"#version 300 es\nprecision mediump float;\n\nuniform sampler2D u_image0;\nuniform vec2 u_resolution;\nuniform int u_int0; // Blend mode\nuniform int u_int1; // Color tint\nuniform float u_float0; // Intensity\nuniform float u_float1; // Radius\nuniform float u_float2; // Threshold\n\nin vec2 v_texCoord;\nout vec4 fragColor;\n\nconst int BLEND_ADD = 0;\nconst int BLEND_SCREEN = 1;\nconst int BLEND_SOFT = 2;\nconst int BLEND_OVERLAY = 3;\nconst int BLEND_LIGHTEN = 4;\n\nconst float GOLDEN_ANGLE = 2.39996323;\nconst int MAX_SAMPLES = 48;\nconst vec3 LUMA = vec3(0.299, 0.587, 0.114);\n\nfloat hash(vec2 p) {\n p = fract(p * vec2(123.34, 456.21));\n p += dot(p, p + 45.32);\n return fract(p.x * p.y);\n}\n\nvec3 hexToRgb(int h) {\n return vec3(\n float((h >> 16) & 255),\n float((h >> 8) & 255),\n float(h & 255)\n ) * (1.0 / 255.0);\n}\n\nvec3 blend(vec3 base, vec3 glow, int mode) {\n if (mode == BLEND_SCREEN) {\n return 1.0 - (1.0 - base) * (1.0 - glow);\n }\n if (mode == BLEND_SOFT) {\n return mix(\n base - (1.0 - 2.0 * glow) * base * (1.0 - base),\n base + (2.0 * glow - 1.0) * (sqrt(base) - base),\n step(0.5, glow)\n );\n }\n if (mode == BLEND_OVERLAY) {\n return mix(\n 2.0 * base * glow,\n 1.0 - 2.0 * (1.0 - base) * (1.0 - glow),\n step(0.5, base)\n );\n }\n if (mode == BLEND_LIGHTEN) {\n return max(base, glow);\n }\n return base + glow;\n}\n\nvoid main() {\n vec4 original = texture(u_image0, v_texCoord);\n \n float intensity = u_float0 * 0.05;\n float radius = u_float1 * u_float1 * 0.012;\n \n if (intensity < 0.001 || radius < 0.1) {\n fragColor = original;\n return;\n }\n \n float threshold = 1.0 - u_float2 * 0.01;\n float t0 = threshold - 0.15;\n float t1 = threshold + 0.15;\n \n vec2 texelSize = 1.0 / u_resolution;\n float radius2 = radius * radius;\n \n float sampleScale = clamp(radius * 0.75, 0.35, 1.0);\n int samples = int(float(MAX_SAMPLES) * sampleScale);\n \n float noise = hash(gl_FragCoord.xy);\n float angleOffset = noise * GOLDEN_ANGLE;\n float radiusJitter = 0.85 + noise * 0.3;\n \n float ca = cos(GOLDEN_ANGLE);\n float sa = sin(GOLDEN_ANGLE);\n vec2 dir = vec2(cos(angleOffset), sin(angleOffset));\n \n vec3 glow = vec3(0.0);\n float totalWeight = 0.0;\n \n // Center tap\n float centerMask = smoothstep(t0, t1, dot(original.rgb, LUMA));\n glow += original.rgb * centerMask * 2.0;\n totalWeight += 2.0;\n \n for (int i = 1; i < MAX_SAMPLES; i++) {\n if (i >= samples) break;\n \n float fi = float(i);\n float dist = sqrt(fi / float(samples)) * radius * radiusJitter;\n \n vec2 offset = dir * dist * texelSize;\n vec3 c = texture(u_image0, v_texCoord + offset).rgb;\n float mask = smoothstep(t0, t1, dot(c, LUMA));\n \n float w = 1.0 - (dist * dist) / (radius2 * 1.5);\n w = max(w, 0.0);\n w *= w;\n \n glow += c * mask * w;\n totalWeight += w;\n \n dir = vec2(\n dir.x * ca - dir.y * sa,\n dir.x * sa + dir.y * ca\n );\n }\n \n glow *= intensity / max(totalWeight, 0.001);\n \n if (u_int1 > 0) {\n glow *= hexToRgb(u_int1);\n }\n \n vec3 result = blend(original.rgb, glow, u_int0);\n result += (noise - 0.5) * (1.0 / 255.0);\n \n fragColor = vec4(clamp(result, 0.0, 1.0), original.a);\n}",
"#version 300 es\nprecision mediump float;\n\nuniform sampler2D u_image0;\nuniform int u_int0; // Blend mode\nuniform int u_int1; // Color tint\nuniform float u_float0; // Intensity\nuniform float u_float1; // Radius\nuniform float u_float2; // Threshold\n\nin vec2 v_texCoord;\nout vec4 fragColor;\n\nconst int BLEND_ADD = 0;\nconst int BLEND_SCREEN = 1;\nconst int BLEND_SOFT = 2;\nconst int BLEND_OVERLAY = 3;\nconst int BLEND_LIGHTEN = 4;\n\nconst float GOLDEN_ANGLE = 2.39996323;\nconst int MAX_SAMPLES = 48;\nconst vec3 LUMA = vec3(0.299, 0.587, 0.114);\n\nfloat hash(vec2 p) {\n p = fract(p * vec2(123.34, 456.21));\n p += dot(p, p + 45.32);\n return fract(p.x * p.y);\n}\n\nvec3 hexToRgb(int h) {\n return vec3(\n float((h >> 16) & 255),\n float((h >> 8) & 255),\n float(h & 255)\n ) * (1.0 / 255.0);\n}\n\nvec3 blend(vec3 base, vec3 glow, int mode) {\n if (mode == BLEND_SCREEN) {\n return 1.0 - (1.0 - base) * (1.0 - glow);\n }\n if (mode == BLEND_SOFT) {\n return mix(\n base - (1.0 - 2.0 * glow) * base * (1.0 - base),\n base + (2.0 * glow - 1.0) * (sqrt(base) - base),\n step(0.5, glow)\n );\n }\n if (mode == BLEND_OVERLAY) {\n return mix(\n 2.0 * base * glow,\n 1.0 - 2.0 * (1.0 - base) * (1.0 - glow),\n step(0.5, base)\n );\n }\n if (mode == BLEND_LIGHTEN) {\n return max(base, glow);\n }\n return base + glow;\n}\n\nvoid main() {\n vec4 original = texture(u_image0, v_texCoord);\n \n float intensity = u_float0 * 0.05;\n float radius = u_float1 * u_float1 * 0.012;\n \n if (intensity < 0.001 || radius < 0.1) {\n fragColor = original;\n return;\n }\n \n float threshold = 1.0 - u_float2 * 0.01;\n float t0 = threshold - 0.15;\n float t1 = threshold + 0.15;\n \n vec2 texelSize = 1.0 / vec2(textureSize(u_image0, 0));\n float radius2 = radius * radius;\n \n float sampleScale = clamp(radius * 0.75, 0.35, 1.0);\n int samples = int(float(MAX_SAMPLES) * sampleScale);\n \n float noise = hash(gl_FragCoord.xy);\n float angleOffset = noise * GOLDEN_ANGLE;\n float radiusJitter = 0.85 + noise * 0.3;\n \n float ca = cos(GOLDEN_ANGLE);\n float sa = sin(GOLDEN_ANGLE);\n vec2 dir = vec2(cos(angleOffset), sin(angleOffset));\n \n vec3 glow = vec3(0.0);\n float totalWeight = 0.0;\n \n // Center tap\n float centerMask = smoothstep(t0, t1, dot(original.rgb, LUMA));\n glow += original.rgb * centerMask * 2.0;\n totalWeight += 2.0;\n \n for (int i = 1; i < MAX_SAMPLES; i++) {\n if (i >= samples) break;\n \n float fi = float(i);\n float dist = sqrt(fi / float(samples)) * radius * radiusJitter;\n \n vec2 offset = dir * dist * texelSize;\n vec3 c = texture(u_image0, v_texCoord + offset).rgb;\n float mask = smoothstep(t0, t1, dot(c, LUMA));\n \n float w = 1.0 - (dist * dist) / (radius2 * 1.5);\n w = max(w, 0.0);\n w *= w;\n \n glow += c * mask * w;\n totalWeight += w;\n \n dir = vec2(\n dir.x * ca - dir.y * sa,\n dir.x * sa + dir.y * ca\n );\n }\n \n glow *= intensity / max(totalWeight, 0.001);\n \n if (u_int1 > 0) {\n glow *= hexToRgb(u_int1);\n }\n \n vec3 result = blend(original.rgb, glow, u_int0);\n result += (noise - 0.5) * (1.0 / 255.0);\n \n fragColor = vec4(clamp(result, 0.0, 1.0), original.a);\n}",
"from_input"
]
},

View File

@ -331,7 +331,7 @@
"Node name for S&R": "GLSLShader"
},
"widgets_values": [
"#version 300 es\n#pragma passes 2\nprecision highp float;\n\n// Blur type constants\nconst int BLUR_GAUSSIAN = 0;\nconst int BLUR_BOX = 1;\nconst int BLUR_RADIAL = 2;\n\n// Radial blur config\nconst int RADIAL_SAMPLES = 12;\nconst float RADIAL_STRENGTH = 0.0003;\n\nuniform sampler2D u_image0;\nuniform vec2 u_resolution;\nuniform int u_int0; // Blur type (BLUR_GAUSSIAN, BLUR_BOX, BLUR_RADIAL)\nuniform float u_float0; // Blur radius/amount\nuniform int u_pass; // Pass index (0 = horizontal, 1 = vertical)\n\nin vec2 v_texCoord;\nlayout(location = 0) out vec4 fragColor0;\n\nfloat gaussian(float x, float sigma) {\n return exp(-(x * x) / (2.0 * sigma * sigma));\n}\n\nvoid main() {\n vec2 texelSize = 1.0 / u_resolution;\n float radius = max(u_float0, 0.0);\n\n // Radial (angular) blur - single pass, doesn't use separable\n if (u_int0 == BLUR_RADIAL) {\n // Only execute on first pass\n if (u_pass > 0) {\n fragColor0 = texture(u_image0, v_texCoord);\n return;\n }\n\n vec2 center = vec2(0.5);\n vec2 dir = v_texCoord - center;\n float dist = length(dir);\n\n if (dist < 1e-4) {\n fragColor0 = texture(u_image0, v_texCoord);\n return;\n }\n\n vec4 sum = vec4(0.0);\n float totalWeight = 0.0;\n float angleStep = radius * RADIAL_STRENGTH;\n\n dir /= dist;\n\n float cosStep = cos(angleStep);\n float sinStep = sin(angleStep);\n\n float negAngle = -float(RADIAL_SAMPLES) * angleStep;\n vec2 rotDir = vec2(\n dir.x * cos(negAngle) - dir.y * sin(negAngle),\n dir.x * sin(negAngle) + dir.y * cos(negAngle)\n );\n\n for (int i = -RADIAL_SAMPLES; i <= RADIAL_SAMPLES; i++) {\n vec2 uv = center + rotDir * dist;\n float w = 1.0 - abs(float(i)) / float(RADIAL_SAMPLES);\n sum += texture(u_image0, uv) * w;\n totalWeight += w;\n\n rotDir = vec2(\n rotDir.x * cosStep - rotDir.y * sinStep,\n rotDir.x * sinStep + rotDir.y * cosStep\n );\n }\n\n fragColor0 = sum / max(totalWeight, 0.001);\n return;\n }\n\n // Separable Gaussian / Box blur\n int samples = int(ceil(radius));\n\n if (samples == 0) {\n fragColor0 = texture(u_image0, v_texCoord);\n return;\n }\n\n // Direction: pass 0 = horizontal, pass 1 = vertical\n vec2 dir = (u_pass == 0) ? vec2(1.0, 0.0) : vec2(0.0, 1.0);\n\n vec4 color = vec4(0.0);\n float totalWeight = 0.0;\n float sigma = radius / 2.0;\n\n for (int i = -samples; i <= samples; i++) {\n vec2 offset = dir * float(i) * texelSize;\n vec4 sample_color = texture(u_image0, v_texCoord + offset);\n\n float weight;\n if (u_int0 == BLUR_GAUSSIAN) {\n weight = gaussian(float(i), sigma);\n } else {\n // BLUR_BOX\n weight = 1.0;\n }\n\n color += sample_color * weight;\n totalWeight += weight;\n }\n\n fragColor0 = color / totalWeight;\n}\n",
"#version 300 es\n#pragma passes 2\nprecision highp float;\n\n// Blur type constants\nconst int BLUR_GAUSSIAN = 0;\nconst int BLUR_BOX = 1;\nconst int BLUR_RADIAL = 2;\n\n// Radial blur config\nconst int RADIAL_SAMPLES = 12;\nconst float RADIAL_STRENGTH = 0.0003;\n\nuniform sampler2D u_image0;\nuniform int u_int0; // Blur type (BLUR_GAUSSIAN, BLUR_BOX, BLUR_RADIAL)\nuniform float u_float0; // Blur radius/amount\nuniform int u_pass; // Pass index (0 = horizontal, 1 = vertical)\n\nin vec2 v_texCoord;\nlayout(location = 0) out vec4 fragColor0;\n\nfloat gaussian(float x, float sigma) {\n return exp(-(x * x) / (2.0 * sigma * sigma));\n}\n\nvoid main() {\n vec2 texelSize = 1.0 / vec2(textureSize(u_image0, 0));\n float radius = max(u_float0, 0.0);\n\n // Radial (angular) blur - single pass, doesn't use separable\n if (u_int0 == BLUR_RADIAL) {\n // Only execute on first pass\n if (u_pass > 0) {\n fragColor0 = texture(u_image0, v_texCoord);\n return;\n }\n\n vec2 center = vec2(0.5);\n vec2 dir = v_texCoord - center;\n float dist = length(dir);\n\n if (dist < 1e-4) {\n fragColor0 = texture(u_image0, v_texCoord);\n return;\n }\n\n vec4 sum = vec4(0.0);\n float totalWeight = 0.0;\n float angleStep = radius * RADIAL_STRENGTH;\n\n dir /= dist;\n\n float cosStep = cos(angleStep);\n float sinStep = sin(angleStep);\n\n float negAngle = -float(RADIAL_SAMPLES) * angleStep;\n vec2 rotDir = vec2(\n dir.x * cos(negAngle) - dir.y * sin(negAngle),\n dir.x * sin(negAngle) + dir.y * cos(negAngle)\n );\n\n for (int i = -RADIAL_SAMPLES; i <= RADIAL_SAMPLES; i++) {\n vec2 uv = center + rotDir * dist;\n float w = 1.0 - abs(float(i)) / float(RADIAL_SAMPLES);\n sum += texture(u_image0, uv) * w;\n totalWeight += w;\n\n rotDir = vec2(\n rotDir.x * cosStep - rotDir.y * sinStep,\n rotDir.x * sinStep + rotDir.y * cosStep\n );\n }\n\n fragColor0 = sum / max(totalWeight, 0.001);\n return;\n }\n\n // Separable Gaussian / Box blur\n int samples = int(ceil(radius));\n\n if (samples == 0) {\n fragColor0 = texture(u_image0, v_texCoord);\n return;\n }\n\n // Direction: pass 0 = horizontal, pass 1 = vertical\n vec2 dir = (u_pass == 0) ? vec2(1.0, 0.0) : vec2(0.0, 1.0);\n\n vec4 color = vec4(0.0);\n float totalWeight = 0.0;\n float sigma = radius / 2.0;\n\n for (int i = -samples; i <= samples; i++) {\n vec2 offset = dir * float(i) * texelSize;\n vec4 sample_color = texture(u_image0, v_texCoord + offset);\n\n float weight;\n if (u_int0 == BLUR_GAUSSIAN) {\n weight = gaussian(float(i), sigma);\n } else {\n // BLUR_BOX\n weight = 1.0;\n }\n\n color += sample_color * weight;\n totalWeight += weight;\n }\n\n fragColor0 = color / totalWeight;\n}\n",
"from_input"
]
}

File diff suppressed because it is too large Load Diff

View File

@ -128,7 +128,7 @@
},
"revision": 0,
"config": {},
"name": "local-Image Edit (Flux.2 Klein 4B)",
"name": "Image Edit (Flux.2 Klein 4B)",
"inputNode": {
"id": -10,
"bounding": [
@ -1837,4 +1837,4 @@
}
},
"version": 0.4
}
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -124,7 +124,7 @@
},
"revision": 0,
"config": {},
"name": "local-Image Inpainting (Qwen-image)",
"name": "Image Inpainting (Qwen-image)",
"inputNode": {
"id": -10,
"bounding": [
@ -1923,4 +1923,4 @@
"workflowRendererVersion": "LG"
},
"version": 0.4
}
}

View File

@ -204,7 +204,7 @@
},
"revision": 0,
"config": {},
"name": "local-Image Outpainting (Qwen-Image)",
"name": "Image Outpainting (Qwen-Image)",
"inputNode": {
"id": -10,
"bounding": [
@ -2749,4 +2749,4 @@
}
},
"version": 0.4
}
}

View File

@ -1,15 +1,14 @@
{
"id": "1a761372-7c82-4016-b9bf-fa285967e1e9",
"revision": 0,
"last_node_id": 83,
"last_node_id": 176,
"last_link_id": 0,
"nodes": [
{
"id": 83,
"type": "f754a936-daaf-4b6e-9658-41fdc54d301d",
"id": 176,
"type": "2d2e3c8e-53b3-4618-be52-6d1d99382f0e",
"pos": [
61.999827823554256,
153.3332507624185
-1150,
200
],
"size": [
400,
@ -56,6 +55,38 @@
"name": "layers"
},
"link": null
},
{
"name": "seed",
"type": "INT",
"widget": {
"name": "seed"
},
"link": null
},
{
"name": "unet_name",
"type": "COMBO",
"widget": {
"name": "unet_name"
},
"link": null
},
{
"name": "clip_name",
"type": "COMBO",
"widget": {
"name": "clip_name"
},
"link": null
},
{
"name": "vae_name",
"type": "COMBO",
"widget": {
"name": "vae_name"
},
"link": null
}
],
"outputs": [
@ -66,28 +97,41 @@
"links": []
}
],
"title": "Image to Layers (Qwen-Image-Layered)",
"properties": {
"proxyWidgets": [
[
"-1",
"6",
"text"
],
[
"-1",
"3",
"steps"
],
[
"-1",
"3",
"cfg"
],
[
"-1",
"83",
"layers"
],
[
"3",
"seed"
],
[
"37",
"unet_name"
],
[
"38",
"clip_name"
],
[
"39",
"vae_name"
],
[
"3",
"control_after_generate"
@ -95,6 +139,11 @@
],
"cnr_id": "comfy-core",
"ver": "0.5.1",
"ue_properties": {
"widget_ue_connectable": {},
"input_ue_unconnectable": {},
"version": "7.7"
},
"enableTabs": false,
"tabWidth": 65,
"tabXOffset": 10,
@ -103,25 +152,20 @@
"secondTabOffset": 80,
"secondTabWidth": 65
},
"widgets_values": [
"",
20,
2.5,
2
]
"widgets_values": []
}
],
"links": [],
"groups": [],
"version": 0.4,
"definitions": {
"subgraphs": [
{
"id": "f754a936-daaf-4b6e-9658-41fdc54d301d",
"id": "2d2e3c8e-53b3-4618-be52-6d1d99382f0e",
"version": 1,
"state": {
"lastGroupId": 3,
"lastNodeId": 83,
"lastLinkId": 159,
"lastGroupId": 8,
"lastNodeId": 176,
"lastLinkId": 380,
"lastRerouteId": 0
},
"revision": 0,
@ -130,10 +174,10 @@
"inputNode": {
"id": -10,
"bounding": [
-510,
523,
-720,
720,
120,
140
220
]
},
"outputNode": {
@ -156,8 +200,8 @@
],
"localized_name": "image",
"pos": [
-410,
543
-620,
740
]
},
{
@ -168,8 +212,8 @@
150
],
"pos": [
-410,
563
-620,
760
]
},
{
@ -180,8 +224,8 @@
153
],
"pos": [
-410,
583
-620,
780
]
},
{
@ -192,8 +236,8 @@
154
],
"pos": [
-410,
603
-620,
800
]
},
{
@ -204,8 +248,56 @@
159
],
"pos": [
-410,
623
-620,
820
]
},
{
"id": "9f76338b-f4ca-4bb3-b61a-57b3f233061e",
"name": "seed",
"type": "INT",
"linkIds": [
377
],
"pos": [
-620,
840
]
},
{
"id": "8d0422d5-5eee-4f7e-9817-dc613cc62eca",
"name": "unet_name",
"type": "COMBO",
"linkIds": [
378
],
"pos": [
-620,
860
]
},
{
"id": "552eece2-a735-4d00-ae78-ded454622bc1",
"name": "clip_name",
"type": "COMBO",
"linkIds": [
379
],
"pos": [
-620,
880
]
},
{
"id": "1e6d141c-d0f9-4a2b-895c-b6780e57cfa0",
"name": "vae_name",
"type": "COMBO",
"linkIds": [
380
],
"pos": [
-620,
900
]
}
],
@ -231,14 +323,14 @@
"type": "CLIPLoader",
"pos": [
-320,
310
360
],
"size": [
346.7470703125,
106
350,
150
],
"flags": {},
"order": 0,
"order": 5,
"mode": 0,
"inputs": [
{
@ -248,7 +340,7 @@
"widget": {
"name": "clip_name"
},
"link": null
"link": 379
},
{
"localized_name": "type",
@ -283,9 +375,14 @@
}
],
"properties": {
"Node name for S&R": "CLIPLoader",
"cnr_id": "comfy-core",
"ver": "0.5.1",
"ue_properties": {
"widget_ue_connectable": {},
"input_ue_unconnectable": {},
"version": "7.7"
},
"Node name for S&R": "CLIPLoader",
"models": [
{
"name": "qwen_2.5_vl_7b_fp8_scaled.safetensors",
@ -312,14 +409,14 @@
"type": "VAELoader",
"pos": [
-320,
460
580
],
"size": [
346.7470703125,
58
350,
110
],
"flags": {},
"order": 1,
"order": 6,
"mode": 0,
"inputs": [
{
@ -329,7 +426,7 @@
"widget": {
"name": "vae_name"
},
"link": null
"link": 380
}
],
"outputs": [
@ -345,9 +442,14 @@
}
],
"properties": {
"Node name for S&R": "VAELoader",
"cnr_id": "comfy-core",
"ver": "0.5.1",
"ue_properties": {
"widget_ue_connectable": {},
"input_ue_unconnectable": {},
"version": "7.7"
},
"Node name for S&R": "VAELoader",
"models": [
{
"name": "qwen_image_layered_vae.safetensors",
@ -375,11 +477,11 @@
420
],
"size": [
425.27801513671875,
180.6060791015625
430,
190
],
"flags": {},
"order": 3,
"order": 2,
"mode": 0,
"inputs": [
{
@ -411,9 +513,14 @@
],
"title": "CLIP Text Encode (Negative Prompt)",
"properties": {
"Node name for S&R": "CLIPTextEncode",
"cnr_id": "comfy-core",
"ver": "0.5.1",
"ue_properties": {
"widget_ue_connectable": {},
"input_ue_unconnectable": {},
"version": "7.7"
},
"Node name for S&R": "CLIPTextEncode",
"enableTabs": false,
"tabWidth": 65,
"tabXOffset": 10,
@ -432,12 +539,12 @@
"id": 70,
"type": "ReferenceLatent",
"pos": [
330,
670
140,
700
],
"size": [
204.1666717529297,
46
210,
50
],
"flags": {
"collapsed": true
@ -470,9 +577,14 @@
}
],
"properties": {
"Node name for S&R": "ReferenceLatent",
"cnr_id": "comfy-core",
"ver": "0.5.1",
"ue_properties": {
"widget_ue_connectable": {},
"input_ue_unconnectable": {},
"version": "7.7"
},
"Node name for S&R": "ReferenceLatent",
"enableTabs": false,
"tabWidth": 65,
"tabXOffset": 10,
@ -480,19 +592,18 @@
"secondTabText": "Send Back",
"secondTabOffset": 80,
"secondTabWidth": 65
},
"widgets_values": []
}
},
{
"id": 69,
"type": "ReferenceLatent",
"pos": [
330,
710
160,
820
],
"size": [
204.1666717529297,
46
210,
50
],
"flags": {
"collapsed": true
@ -525,9 +636,14 @@
}
],
"properties": {
"Node name for S&R": "ReferenceLatent",
"cnr_id": "comfy-core",
"ver": "0.5.1",
"ue_properties": {
"widget_ue_connectable": {},
"input_ue_unconnectable": {},
"version": "7.7"
},
"Node name for S&R": "ReferenceLatent",
"enableTabs": false,
"tabWidth": 65,
"tabXOffset": 10,
@ -535,8 +651,7 @@
"secondTabText": "Send Back",
"secondTabOffset": 80,
"secondTabWidth": 65
},
"widgets_values": []
}
},
{
"id": 66,
@ -547,10 +662,10 @@
],
"size": [
270,
58
110
],
"flags": {},
"order": 4,
"order": 7,
"mode": 0,
"inputs": [
{
@ -580,9 +695,14 @@
}
],
"properties": {
"Node name for S&R": "ModelSamplingAuraFlow",
"cnr_id": "comfy-core",
"ver": "0.5.1",
"ue_properties": {
"widget_ue_connectable": {},
"input_ue_unconnectable": {},
"version": "7.7"
},
"Node name for S&R": "ModelSamplingAuraFlow",
"enableTabs": false,
"tabWidth": 65,
"tabXOffset": 10,
@ -600,11 +720,11 @@
"type": "LatentCutToBatch",
"pos": [
830,
160
140
],
"size": [
270,
82
140
],
"flags": {},
"order": 11,
@ -646,9 +766,14 @@
}
],
"properties": {
"Node name for S&R": "LatentCutToBatch",
"cnr_id": "comfy-core",
"ver": "0.5.1",
"ue_properties": {
"widget_ue_connectable": {},
"input_ue_unconnectable": {},
"version": "7.7"
},
"Node name for S&R": "LatentCutToBatch",
"enableTabs": false,
"tabWidth": 65,
"tabXOffset": 10,
@ -666,12 +791,12 @@
"id": 71,
"type": "VAEEncode",
"pos": [
100,
690
-280,
780
],
"size": [
140,
46
230,
100
],
"flags": {
"collapsed": false
@ -704,9 +829,14 @@
}
],
"properties": {
"Node name for S&R": "VAEEncode",
"cnr_id": "comfy-core",
"ver": "0.5.1",
"ue_properties": {
"widget_ue_connectable": {},
"input_ue_unconnectable": {},
"version": "7.7"
},
"Node name for S&R": "VAEEncode",
"enableTabs": false,
"tabWidth": 65,
"tabXOffset": 10,
@ -714,24 +844,23 @@
"secondTabText": "Send Back",
"secondTabOffset": 80,
"secondTabWidth": 65
},
"widgets_values": []
}
},
{
"id": 8,
"type": "VAEDecode",
"pos": [
850,
310
370
],
"size": [
210,
46
50
],
"flags": {
"collapsed": true
},
"order": 7,
"order": 3,
"mode": 0,
"inputs": [
{
@ -759,9 +888,14 @@
}
],
"properties": {
"Node name for S&R": "VAEDecode",
"cnr_id": "comfy-core",
"ver": "0.5.1",
"ue_properties": {
"widget_ue_connectable": {},
"input_ue_unconnectable": {},
"version": "7.7"
},
"Node name for S&R": "VAEDecode",
"enableTabs": false,
"tabWidth": 65,
"tabXOffset": 10,
@ -769,8 +903,7 @@
"secondTabText": "Send Back",
"secondTabOffset": 80,
"secondTabWidth": 65
},
"widgets_values": []
}
},
{
"id": 6,
@ -780,11 +913,11 @@
180
],
"size": [
422.84503173828125,
164.31304931640625
430,
170
],
"flags": {},
"order": 6,
"order": 1,
"mode": 0,
"inputs": [
{
@ -816,9 +949,14 @@
],
"title": "CLIP Text Encode (Positive Prompt)",
"properties": {
"Node name for S&R": "CLIPTextEncode",
"cnr_id": "comfy-core",
"ver": "0.5.1",
"ue_properties": {
"widget_ue_connectable": {},
"input_ue_unconnectable": {},
"version": "7.7"
},
"Node name for S&R": "CLIPTextEncode",
"enableTabs": false,
"tabWidth": 65,
"tabXOffset": 10,
@ -838,14 +976,14 @@
"type": "KSampler",
"pos": [
530,
280
340
],
"size": [
270,
400
],
"flags": {},
"order": 5,
"order": 0,
"mode": 0,
"inputs": [
{
@ -879,7 +1017,7 @@
"widget": {
"name": "seed"
},
"link": null
"link": 377
},
{
"localized_name": "steps",
@ -939,9 +1077,14 @@
}
],
"properties": {
"Node name for S&R": "KSampler",
"cnr_id": "comfy-core",
"ver": "0.5.1",
"ue_properties": {
"widget_ue_connectable": {},
"input_ue_unconnectable": {},
"version": "7.7"
},
"Node name for S&R": "KSampler",
"enableTabs": false,
"tabWidth": 65,
"tabXOffset": 10,
@ -964,12 +1107,12 @@
"id": 78,
"type": "GetImageSize",
"pos": [
80,
790
-280,
930
],
"size": [
210,
136
230,
140
],
"flags": {},
"order": 12,
@ -1007,9 +1150,14 @@
}
],
"properties": {
"Node name for S&R": "GetImageSize",
"cnr_id": "comfy-core",
"ver": "0.5.1",
"ue_properties": {
"widget_ue_connectable": {},
"input_ue_unconnectable": {},
"version": "7.7"
},
"Node name for S&R": "GetImageSize",
"enableTabs": false,
"tabWidth": 65,
"tabXOffset": 10,
@ -1017,23 +1165,23 @@
"secondTabText": "Send Back",
"secondTabOffset": 80,
"secondTabWidth": 65
},
"widgets_values": []
}
},
{
"id": 83,
"type": "EmptyQwenImageLayeredLatentImage",
"pos": [
320,
790
-280,
1120
],
"size": [
330.9341796875,
130
340,
200
],
"flags": {},
"order": 13,
"mode": 0,
"showAdvanced": true,
"inputs": [
{
"localized_name": "width",
@ -1083,9 +1231,14 @@
}
],
"properties": {
"Node name for S&R": "EmptyQwenImageLayeredLatentImage",
"cnr_id": "comfy-core",
"ver": "0.5.1",
"ue_properties": {
"widget_ue_connectable": {},
"input_ue_unconnectable": {},
"version": "7.7"
},
"Node name for S&R": "EmptyQwenImageLayeredLatentImage",
"enableTabs": false,
"tabWidth": 65,
"tabXOffset": 10,
@ -1109,11 +1262,11 @@
180
],
"size": [
346.7470703125,
82
350,
110
],
"flags": {},
"order": 2,
"order": 4,
"mode": 0,
"inputs": [
{
@ -1123,7 +1276,7 @@
"widget": {
"name": "unet_name"
},
"link": null
"link": 378
},
{
"localized_name": "weight_dtype",
@ -1147,9 +1300,14 @@
}
],
"properties": {
"Node name for S&R": "UNETLoader",
"cnr_id": "comfy-core",
"ver": "0.5.1",
"ue_properties": {
"widget_ue_connectable": {},
"input_ue_unconnectable": {},
"version": "7.7"
},
"Node name for S&R": "UNETLoader",
"models": [
{
"name": "qwen_image_layered_bf16.safetensors",
@ -1191,8 +1349,8 @@
"bounding": [
-330,
110,
366.7470703125,
421.6
370,
610
],
"color": "#3f789e",
"font_size": 24,
@ -1391,6 +1549,38 @@
"target_id": 83,
"target_slot": 2,
"type": "INT"
},
{
"id": 377,
"origin_id": -10,
"origin_slot": 5,
"target_id": 3,
"target_slot": 4,
"type": "INT"
},
{
"id": 378,
"origin_id": -10,
"origin_slot": 6,
"target_id": 37,
"target_slot": 0,
"type": "COMBO"
},
{
"id": 379,
"origin_id": -10,
"origin_slot": 7,
"target_id": 38,
"target_slot": 0,
"type": "COMBO"
},
{
"id": 380,
"origin_id": -10,
"origin_slot": 8,
"target_id": 39,
"target_slot": 0,
"type": "COMBO"
}
],
"extra": {
@ -1400,7 +1590,6 @@
}
]
},
"config": {},
"extra": {
"ds": {
"scale": 1.14,
@ -1409,7 +1598,6 @@
6.855893974423647
]
},
"workflowRendererVersion": "LG"
},
"version": 0.4
}
"ue_links": []
}
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -267,7 +267,7 @@
"Node name for S&R": "GLSLShader"
},
"widgets_values": [
"#version 300 es\nprecision highp float;\n\nuniform sampler2D u_image0;\nuniform vec2 u_resolution;\nuniform float u_float0; // strength [0.0 2.0] typical: 0.31.0\n\nin vec2 v_texCoord;\nlayout(location = 0) out vec4 fragColor0;\n\nvoid main() {\n vec2 texel = 1.0 / u_resolution;\n \n // Sample center and neighbors\n vec4 center = texture(u_image0, v_texCoord);\n vec4 top = texture(u_image0, v_texCoord + vec2( 0.0, -texel.y));\n vec4 bottom = texture(u_image0, v_texCoord + vec2( 0.0, texel.y));\n vec4 left = texture(u_image0, v_texCoord + vec2(-texel.x, 0.0));\n vec4 right = texture(u_image0, v_texCoord + vec2( texel.x, 0.0));\n \n // Edge enhancement (Laplacian)\n vec4 edges = center * 4.0 - top - bottom - left - right;\n \n // Add edges back scaled by strength\n vec4 sharpened = center + edges * u_float0;\n \n fragColor0 = vec4(clamp(sharpened.rgb, 0.0, 1.0), center.a);\n}",
"#version 300 es\nprecision highp float;\n\nuniform sampler2D u_image0;\nuniform float u_float0; // strength [0.0 2.0] typical: 0.31.0\n\nin vec2 v_texCoord;\nlayout(location = 0) out vec4 fragColor0;\n\nvoid main() {\n vec2 texel = 1.0 / vec2(textureSize(u_image0, 0));\n \n // Sample center and neighbors\n vec4 center = texture(u_image0, v_texCoord);\n vec4 top = texture(u_image0, v_texCoord + vec2( 0.0, -texel.y));\n vec4 bottom = texture(u_image0, v_texCoord + vec2( 0.0, texel.y));\n vec4 left = texture(u_image0, v_texCoord + vec2(-texel.x, 0.0));\n vec4 right = texture(u_image0, v_texCoord + vec2( texel.x, 0.0));\n \n // Edge enhancement (Laplacian)\n vec4 edges = center * 4.0 - top - bottom - left - right;\n \n // Add edges back scaled by strength\n vec4 sharpened = center + edges * u_float0;\n \n fragColor0 = vec4(clamp(sharpened.rgb, 0.0, 1.0), center.a);\n}",
"from_input"
]
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -383,7 +383,7 @@
"Node name for S&R": "GLSLShader"
},
"widgets_values": [
"#version 300 es\nprecision highp float;\n\nuniform sampler2D u_image0;\nuniform vec2 u_resolution;\nuniform float u_float0; // amount [0.0 - 3.0] typical: 0.5-1.5\nuniform float u_float1; // radius [0.5 - 10.0] blur radius in pixels\nuniform float u_float2; // threshold [0.0 - 0.1] min difference to sharpen\n\nin vec2 v_texCoord;\nlayout(location = 0) out vec4 fragColor0;\n\nfloat gaussian(float x, float sigma) {\n return exp(-(x * x) / (2.0 * sigma * sigma));\n}\n\nfloat getLuminance(vec3 color) {\n return dot(color, vec3(0.2126, 0.7152, 0.0722));\n}\n\nvoid main() {\n vec2 texel = 1.0 / u_resolution;\n float radius = max(u_float1, 0.5);\n float amount = u_float0;\n float threshold = u_float2;\n\n vec4 original = texture(u_image0, v_texCoord);\n\n // Gaussian blur for the \"unsharp\" mask\n int samples = int(ceil(radius));\n float sigma = radius / 2.0;\n\n vec4 blurred = vec4(0.0);\n float totalWeight = 0.0;\n\n for (int x = -samples; x <= samples; x++) {\n for (int y = -samples; y <= samples; y++) {\n vec2 offset = vec2(float(x), float(y)) * texel;\n vec4 sample_color = texture(u_image0, v_texCoord + offset);\n\n float dist = length(vec2(float(x), float(y)));\n float weight = gaussian(dist, sigma);\n blurred += sample_color * weight;\n totalWeight += weight;\n }\n }\n blurred /= totalWeight;\n\n // Unsharp mask = original - blurred\n vec3 mask = original.rgb - blurred.rgb;\n\n // Luminance-based threshold with smooth falloff\n float lumaDelta = abs(getLuminance(original.rgb) - getLuminance(blurred.rgb));\n float thresholdScale = smoothstep(0.0, threshold, lumaDelta);\n mask *= thresholdScale;\n\n // Sharpen: original + mask * amount\n vec3 sharpened = original.rgb + mask * amount;\n\n fragColor0 = vec4(clamp(sharpened, 0.0, 1.0), original.a);\n}\n",
"#version 300 es\nprecision highp float;\n\nuniform sampler2D u_image0;\nuniform float u_float0; // amount [0.0 - 3.0] typical: 0.5-1.5\nuniform float u_float1; // radius [0.5 - 10.0] blur radius in pixels\nuniform float u_float2; // threshold [0.0 - 0.1] min difference to sharpen\n\nin vec2 v_texCoord;\nlayout(location = 0) out vec4 fragColor0;\n\nfloat gaussian(float x, float sigma) {\n return exp(-(x * x) / (2.0 * sigma * sigma));\n}\n\nfloat getLuminance(vec3 color) {\n return dot(color, vec3(0.2126, 0.7152, 0.0722));\n}\n\nvoid main() {\n vec2 texel = 1.0 / vec2(textureSize(u_image0, 0));\n float radius = max(u_float1, 0.5);\n float amount = u_float0;\n float threshold = u_float2;\n\n vec4 original = texture(u_image0, v_texCoord);\n\n // Gaussian blur for the \"unsharp\" mask\n int samples = int(ceil(radius));\n float sigma = radius / 2.0;\n\n vec4 blurred = vec4(0.0);\n float totalWeight = 0.0;\n\n for (int x = -samples; x <= samples; x++) {\n for (int y = -samples; y <= samples; y++) {\n vec2 offset = vec2(float(x), float(y)) * texel;\n vec4 sample_color = texture(u_image0, v_texCoord + offset);\n\n float dist = length(vec2(float(x), float(y)));\n float weight = gaussian(dist, sigma);\n blurred += sample_color * weight;\n totalWeight += weight;\n }\n }\n blurred /= totalWeight;\n\n // Unsharp mask = original - blurred\n vec3 mask = original.rgb - blurred.rgb;\n\n // Luminance-based threshold with smooth falloff\n float lumaDelta = abs(getLuminance(original.rgb) - getLuminance(blurred.rgb));\n float thresholdScale = smoothstep(0.0, threshold, lumaDelta);\n mask *= thresholdScale;\n\n // Sharpen: original + mask * amount\n vec3 sharpened = original.rgb + mask * amount;\n\n fragColor0 = vec4(clamp(sharpened, 0.0, 1.0), original.a);\n}\n",
"from_input"
]
}

View File

@ -54,7 +54,7 @@ class SplitMHA(nn.Module):
if mask is not None and mask.ndim == 2:
mask = mask[:, None, None, :] # [B, T] -> [B, 1, 1, T] for SDPA broadcast
dtype = q.dtype # manual_cast may produce mixed dtypes
out = optimized_attention(q, k.to(dtype), v.to(dtype), self.num_heads, mask=mask)
out = optimized_attention(q, k.to(dtype), v.to(dtype), self.num_heads, mask=mask, low_precision_attention=False)
return self.out_proj(out)

View File

@ -40,7 +40,7 @@ class SAMAttention(nn.Module):
q = self.q_proj(q)
k = self.k_proj(k)
v = self.v_proj(v)
return self.out_proj(optimized_attention(q, k, v, self.num_heads))
return self.out_proj(optimized_attention(q, k, v, self.num_heads, low_precision_attention=False))
class TwoWayAttentionBlock(nn.Module):
@ -179,7 +179,7 @@ class Attention(nn.Module):
q, k, v = qkv.permute(2, 0, 3, 1, 4).unbind(dim=0)
if self.use_rope and freqs_cis is not None:
q, k = apply_rope(q, k, freqs_cis)
return self.proj(optimized_attention(q, k, v, self.num_heads, skip_reshape=True))
return self.proj(optimized_attention(q, k, v, self.num_heads, skip_reshape=True, low_precision_attention=False))
class Block(nn.Module):

View File

@ -364,7 +364,7 @@ class SplitAttn(nn.Module):
v = self.v_proj(v)
if rope is not None:
q, k = apply_rope_memory(q, k, rope, self.num_heads, num_k_exclude_rope)
out = optimized_attention(q, k, v, self.num_heads)
out = optimized_attention(q, k, v, self.num_heads, low_precision_attention=False)
return self.out_proj(out)
@ -657,7 +657,7 @@ class DecoupledMemoryAttnLayer(nn.Module):
v = self.self_attn_v_proj(normed)
if rope is not None:
q, k = apply_rope_memory(q, k, rope, self.num_heads, 0)
x = x + self.self_attn_out_proj(optimized_attention(q, k, v, self.num_heads))
x = x + self.self_attn_out_proj(optimized_attention(q, k, v, self.num_heads, low_precision_attention=False))
# Decoupled cross-attention: fuse image and memory projections
normed = self.norm2(x)
@ -668,7 +668,7 @@ class DecoupledMemoryAttnLayer(nn.Module):
v = self.cross_attn_v_proj(memory)
if rope is not None:
q, k = apply_rope_memory(q, k, rope, self.num_heads, num_k_exclude_rope)
x = x + self.cross_attn_out_proj(optimized_attention(q, k, v, self.num_heads))
x = x + self.cross_attn_out_proj(optimized_attention(q, k, v, self.num_heads, low_precision_attention=False))
# FFN
x = x + self.linear2(F.gelu(self.linear1(self.norm3(x))))

View File

@ -9,6 +9,7 @@ from comfy_api.latest._input import (
CurveInput,
MonotoneCubicCurve,
LinearCurve,
RangeInput,
)
__all__ = [
@ -21,4 +22,5 @@ __all__ = [
"CurveInput",
"MonotoneCubicCurve",
"LinearCurve",
"RangeInput",
]

View File

@ -1,5 +1,6 @@
from .basic_types import ImageInput, AudioInput, MaskInput, LatentInput
from .curve_types import CurvePoint, CurveInput, MonotoneCubicCurve, LinearCurve
from .range_types import RangeInput
from .video_types import VideoInput
__all__ = [
@ -12,4 +13,5 @@ __all__ = [
"CurveInput",
"MonotoneCubicCurve",
"LinearCurve",
"RangeInput",
]

View File

@ -0,0 +1,70 @@
from __future__ import annotations
import logging
import math
import numpy as np
logger = logging.getLogger(__name__)
class RangeInput:
"""Represents a levels/range adjustment: input range [min, max] with
optional midpoint (gamma control).
Generates a 1D LUT identical to GIMP's levels mapping:
1. Normalize input to [0, 1] using [min, max]
2. Apply gamma correction: pow(value, 1/gamma)
3. Clamp to [0, 1]
The midpoint field is a position in [0, 1] representing where the
midtone falls within [min, max]. It maps to gamma via:
gamma = -log2(midpoint)
So midpoint=0.5 gamma=1.0 (linear).
"""
def __init__(self, min_val: float, max_val: float, midpoint: float | None = None):
self.min_val = min_val
self.max_val = max_val
self.midpoint = midpoint
@staticmethod
def from_raw(data) -> RangeInput:
if isinstance(data, RangeInput):
return data
if isinstance(data, dict):
return RangeInput(
min_val=float(data.get("min", 0.0)),
max_val=float(data.get("max", 1.0)),
midpoint=float(data["midpoint"]) if data.get("midpoint") is not None else None,
)
raise TypeError(f"Cannot convert {type(data)} to RangeInput")
def to_lut(self, size: int = 256) -> np.ndarray:
"""Generate a float64 lookup table mapping [0, 1] input through this
levels adjustment.
The LUT maps normalized input values (0..1) to output values (0..1),
matching the GIMP levels formula.
"""
xs = np.linspace(0.0, 1.0, size, dtype=np.float64)
in_range = self.max_val - self.min_val
if abs(in_range) < 1e-10:
return np.where(xs >= self.min_val, 1.0, 0.0).astype(np.float64)
# Normalize: map [min, max] → [0, 1]
result = (xs - self.min_val) / in_range
result = np.clip(result, 0.0, 1.0)
# Gamma correction from midpoint
if self.midpoint is not None and self.midpoint > 0 and self.midpoint != 0.5:
gamma = max(-math.log2(self.midpoint), 0.001)
inv_gamma = 1.0 / gamma
mask = result > 0
result[mask] = np.power(result[mask], inv_gamma)
return result
def __repr__(self) -> str:
mid = f", midpoint={self.midpoint}" if self.midpoint is not None else ""
return f"RangeInput(min={self.min_val}, max={self.max_val}{mid})"

View File

@ -240,19 +240,34 @@ class VideoFromFile(VideoInput):
start_time = self.__start_time
# Get video frames
frames = []
alphas = None
start_pts = int(start_time / video_stream.time_base)
end_pts = int((start_time + self.__duration) / video_stream.time_base)
container.seek(start_pts, stream=video_stream)
image_format = 'gbrpf32le'
for frame in container.decode(video_stream):
if alphas is None:
for comp in frame.format.components:
if comp.is_alpha:
alphas = []
image_format = 'gbrapf32le'
break
if frame.pts < start_pts:
continue
if self.__duration and frame.pts >= end_pts:
break
img = frame.to_ndarray(format='rgb24') # shape: (H, W, 3)
img = torch.from_numpy(img) / 255.0 # shape: (H, W, 3)
frames.append(img)
images = torch.stack(frames) if len(frames) > 0 else torch.zeros(0, 3, 0, 0)
img = frame.to_ndarray(format=image_format) # shape: (H, W, 4)
if alphas is None:
frames.append(torch.from_numpy(img))
else:
frames.append(torch.from_numpy(img[..., :-1]))
alphas.append(torch.from_numpy(img[..., -1:]))
images = torch.stack(frames) if len(frames) > 0 else torch.zeros(0, 0, 0, 3)
if alphas is not None:
alphas = torch.stack(alphas) if len(alphas) > 0 else torch.zeros(0, 0, 0, 1)
# Get frame rate
frame_rate = Fraction(video_stream.average_rate) if video_stream.average_rate else Fraction(1)
@ -295,7 +310,7 @@ class VideoFromFile(VideoInput):
})
metadata = container.metadata
return VideoComponents(images=images, audio=audio, frame_rate=frame_rate, metadata=metadata)
return VideoComponents(images=images, alpha=alphas, audio=audio, frame_rate=frame_rate, metadata=metadata)
def get_components(self) -> VideoComponents:
if isinstance(self.__file, io.BytesIO):

View File

@ -1266,6 +1266,43 @@ class Histogram(ComfyTypeIO):
Type = list[int]
@comfytype(io_type="RANGE")
class Range(ComfyTypeIO):
from comfy_api.input import RangeInput
if TYPE_CHECKING:
Type = RangeInput
class Input(WidgetInput):
def __init__(self, id: str, display_name: str=None, optional=False, tooltip: str=None,
socketless: bool=True, default: dict=None,
display: str=None,
gradient_stops: list=None,
show_midpoint: bool=None,
midpoint_scale: str=None,
value_min: float=None,
value_max: float=None,
advanced: bool=None):
super().__init__(id, display_name, optional, tooltip, None, default, socketless, None, None, None, None, advanced)
if default is None:
self.default = {"min": 0.0, "max": 1.0}
self.display = display
self.gradient_stops = gradient_stops
self.show_midpoint = show_midpoint
self.midpoint_scale = midpoint_scale
self.value_min = value_min
self.value_max = value_max
def as_dict(self):
return super().as_dict() | prune_dict({
"display": self.display,
"gradient_stops": self.gradient_stops,
"show_midpoint": self.show_midpoint,
"midpoint_scale": self.midpoint_scale,
"value_min": self.value_min,
"value_max": self.value_max,
})
DYNAMIC_INPUT_LOOKUP: dict[str, Callable[[dict[str, Any], dict[str, Any], tuple[str, dict[str, Any]], str, list[str] | None], None]] = {}
def register_dynamic_input_func(io_type: str, func: Callable[[dict[str, Any], dict[str, Any], tuple[str, dict[str, Any]], str, list[str] | None], None]):
DYNAMIC_INPUT_LOOKUP[io_type] = func
@ -2276,5 +2313,6 @@ __all__ = [
"BoundingBox",
"Curve",
"Histogram",
"Range",
"NodeReplace",
]

View File

@ -3,7 +3,7 @@ from dataclasses import dataclass
from enum import Enum
from fractions import Fraction
from typing import Optional
from .._input import ImageInput, AudioInput
from .._input import ImageInput, AudioInput, MaskInput
class VideoCodec(str, Enum):
AUTO = "auto"
@ -48,5 +48,4 @@ class VideoComponents:
frame_rate: Fraction
audio: Optional[AudioInput] = None
metadata: Optional[dict] = None
alpha: Optional[MaskInput] = None

View File

@ -118,7 +118,7 @@ class Wan27ReferenceVideoInputField(BaseModel):
class Wan27ReferenceVideoParametersField(BaseModel):
resolution: str = Field(...)
ratio: str | None = Field(None)
duration: int = Field(5, ge=2, le=10)
duration: int = Field(5, ge=2, le=15)
watermark: bool = Field(False)
seed: int = Field(..., ge=0, le=2147483647)
@ -157,7 +157,7 @@ class Wan27VideoEditInputField(BaseModel):
class Wan27VideoEditParametersField(BaseModel):
resolution: str = Field(...)
ratio: str | None = Field(None)
duration: int = Field(0)
duration: int | None = Field(0)
audio_setting: str = Field("auto")
watermark: bool = Field(False)
seed: int = Field(..., ge=0, le=2147483647)

View File

@ -276,6 +276,7 @@ async def finish_omni_video_task(cls: type[IO.ComfyNode], response: TaskStatusRe
cls,
ApiEndpoint(path=f"/proxy/kling/v1/videos/omni-video/{response.data.task_id}"),
response_model=TaskStatusResponse,
max_poll_attempts=280,
status_extractor=lambda r: (r.data.task_status if r.data else None),
)
return IO.NodeOutput(await download_url_to_video_output(final_response.data.task_result.videos[0].url))
@ -862,7 +863,7 @@ class OmniProTextToVideoNode(IO.ComfyNode):
),
IO.Combo.Input("aspect_ratio", options=["16:9", "9:16", "1:1"]),
IO.Int.Input("duration", default=5, min=3, max=15, display_mode=IO.NumberDisplay.slider),
IO.Combo.Input("resolution", options=["1080p", "720p"], optional=True),
IO.Combo.Input("resolution", options=["4k", "1080p", "720p"], default="1080p", optional=True),
IO.DynamicCombo.Input(
"storyboards",
options=[
@ -904,12 +905,13 @@ class OmniProTextToVideoNode(IO.ComfyNode):
depends_on=IO.PriceBadgeDepends(widgets=["duration", "resolution", "model_name", "generate_audio"]),
expr="""
(
$mode := (widgets.resolution = "720p") ? "std" : "pro";
$res := widgets.resolution;
$mode := $res = "4k" ? "4k" : ($res = "720p" ? "std" : "pro");
$isV3 := $contains(widgets.model_name, "v3");
$audio := $isV3 and widgets.generate_audio;
$rates := $audio
? {"std": 0.112, "pro": 0.14}
: {"std": 0.084, "pro": 0.112};
? {"std": 0.112, "pro": 0.14, "4k": 0.42}
: {"std": 0.084, "pro": 0.112, "4k": 0.42};
{"type":"usd","usd": $lookup($rates, $mode) * widgets.duration}
)
""",
@ -934,6 +936,8 @@ class OmniProTextToVideoNode(IO.ComfyNode):
raise ValueError("kling-video-o1 only supports durations of 5 or 10 seconds.")
if generate_audio:
raise ValueError("kling-video-o1 does not support audio generation.")
if resolution == "4k":
raise ValueError("kling-video-o1 does not support 4k resolution.")
stories_enabled = storyboards is not None and storyboards["storyboards"] != "disabled"
if stories_enabled and model_name == "kling-video-o1":
raise ValueError("kling-video-o1 does not support storyboards.")
@ -963,6 +967,12 @@ class OmniProTextToVideoNode(IO.ComfyNode):
f"must equal the global duration ({duration}s)."
)
if resolution == "4k":
mode = "4k"
elif resolution == "1080p":
mode = "pro"
else:
mode = "std"
response = await sync_op(
cls,
ApiEndpoint(path="/proxy/kling/v1/videos/omni-video", method="POST"),
@ -972,7 +982,7 @@ class OmniProTextToVideoNode(IO.ComfyNode):
prompt=prompt,
aspect_ratio=aspect_ratio,
duration=str(duration),
mode="pro" if resolution == "1080p" else "std",
mode=mode,
multi_shot=multi_shot,
multi_prompt=multi_prompt_list,
shot_type="customize" if multi_shot else None,
@ -1014,7 +1024,7 @@ class OmniProFirstLastFrameNode(IO.ComfyNode):
optional=True,
tooltip="Up to 6 additional reference images.",
),
IO.Combo.Input("resolution", options=["1080p", "720p"], optional=True),
IO.Combo.Input("resolution", options=["4k", "1080p", "720p"], default="1080p", optional=True),
IO.DynamicCombo.Input(
"storyboards",
options=[
@ -1061,12 +1071,13 @@ class OmniProFirstLastFrameNode(IO.ComfyNode):
depends_on=IO.PriceBadgeDepends(widgets=["duration", "resolution", "model_name", "generate_audio"]),
expr="""
(
$mode := (widgets.resolution = "720p") ? "std" : "pro";
$res := widgets.resolution;
$mode := $res = "4k" ? "4k" : ($res = "720p" ? "std" : "pro");
$isV3 := $contains(widgets.model_name, "v3");
$audio := $isV3 and widgets.generate_audio;
$rates := $audio
? {"std": 0.112, "pro": 0.14}
: {"std": 0.084, "pro": 0.112};
? {"std": 0.112, "pro": 0.14, "4k": 0.42}
: {"std": 0.084, "pro": 0.112, "4k": 0.42};
{"type":"usd","usd": $lookup($rates, $mode) * widgets.duration}
)
""",
@ -1093,6 +1104,8 @@ class OmniProFirstLastFrameNode(IO.ComfyNode):
raise ValueError("kling-video-o1 does not support durations greater than 10 seconds.")
if generate_audio:
raise ValueError("kling-video-o1 does not support audio generation.")
if resolution == "4k":
raise ValueError("kling-video-o1 does not support 4k resolution.")
stories_enabled = storyboards is not None and storyboards["storyboards"] != "disabled"
if stories_enabled and model_name == "kling-video-o1":
raise ValueError("kling-video-o1 does not support storyboards.")
@ -1161,6 +1174,12 @@ class OmniProFirstLastFrameNode(IO.ComfyNode):
validate_image_aspect_ratio(i, (1, 2.5), (2.5, 1))
for i in await upload_images_to_comfyapi(cls, reference_images, wait_label="Uploading reference frame(s)"):
image_list.append(OmniParamImage(image_url=i))
if resolution == "4k":
mode = "4k"
elif resolution == "1080p":
mode = "pro"
else:
mode = "std"
response = await sync_op(
cls,
ApiEndpoint(path="/proxy/kling/v1/videos/omni-video", method="POST"),
@ -1170,7 +1189,7 @@ class OmniProFirstLastFrameNode(IO.ComfyNode):
prompt=prompt,
duration=str(duration),
image_list=image_list,
mode="pro" if resolution == "1080p" else "std",
mode=mode,
sound="on" if generate_audio else "off",
multi_shot=multi_shot,
multi_prompt=multi_prompt_list,
@ -1204,7 +1223,7 @@ class OmniProImageToVideoNode(IO.ComfyNode):
"reference_images",
tooltip="Up to 7 reference images.",
),
IO.Combo.Input("resolution", options=["1080p", "720p"], optional=True),
IO.Combo.Input("resolution", options=["4k", "1080p", "720p"], default="1080p", optional=True),
IO.DynamicCombo.Input(
"storyboards",
options=[
@ -1251,12 +1270,13 @@ class OmniProImageToVideoNode(IO.ComfyNode):
depends_on=IO.PriceBadgeDepends(widgets=["duration", "resolution", "model_name", "generate_audio"]),
expr="""
(
$mode := (widgets.resolution = "720p") ? "std" : "pro";
$res := widgets.resolution;
$mode := $res = "4k" ? "4k" : ($res = "720p" ? "std" : "pro");
$isV3 := $contains(widgets.model_name, "v3");
$audio := $isV3 and widgets.generate_audio;
$rates := $audio
? {"std": 0.112, "pro": 0.14}
: {"std": 0.084, "pro": 0.112};
? {"std": 0.112, "pro": 0.14, "4k": 0.42}
: {"std": 0.084, "pro": 0.112, "4k": 0.42};
{"type":"usd","usd": $lookup($rates, $mode) * widgets.duration}
)
""",
@ -1282,6 +1302,8 @@ class OmniProImageToVideoNode(IO.ComfyNode):
raise ValueError("kling-video-o1 does not support durations greater than 10 seconds.")
if generate_audio:
raise ValueError("kling-video-o1 does not support audio generation.")
if resolution == "4k":
raise ValueError("kling-video-o1 does not support 4k resolution.")
stories_enabled = storyboards is not None and storyboards["storyboards"] != "disabled"
if stories_enabled and model_name == "kling-video-o1":
raise ValueError("kling-video-o1 does not support storyboards.")
@ -1320,6 +1342,12 @@ class OmniProImageToVideoNode(IO.ComfyNode):
image_list: list[OmniParamImage] = []
for i in await upload_images_to_comfyapi(cls, reference_images, wait_label="Uploading reference image"):
image_list.append(OmniParamImage(image_url=i))
if resolution == "4k":
mode = "4k"
elif resolution == "1080p":
mode = "pro"
else:
mode = "std"
response = await sync_op(
cls,
ApiEndpoint(path="/proxy/kling/v1/videos/omni-video", method="POST"),
@ -1330,7 +1358,7 @@ class OmniProImageToVideoNode(IO.ComfyNode):
aspect_ratio=aspect_ratio,
duration=str(duration),
image_list=image_list,
mode="pro" if resolution == "1080p" else "std",
mode=mode,
sound="on" if generate_audio else "off",
multi_shot=multi_shot,
multi_prompt=multi_prompt_list,
@ -2860,7 +2888,7 @@ class KlingVideoNode(IO.ComfyNode):
IO.DynamicCombo.Option(
"kling-v3",
[
IO.Combo.Input("resolution", options=["1080p", "720p"]),
IO.Combo.Input("resolution", options=["4k", "1080p", "720p"], default="1080p"),
IO.Combo.Input(
"aspect_ratio",
options=["16:9", "9:16", "1:1"],
@ -2913,7 +2941,11 @@ class KlingVideoNode(IO.ComfyNode):
),
expr="""
(
$rates := {"1080p": {"off": 0.112, "on": 0.168}, "720p": {"off": 0.084, "on": 0.126}};
$rates := {
"4k": {"off": 0.42, "on": 0.42},
"1080p": {"off": 0.112, "on": 0.168},
"720p": {"off": 0.084, "on": 0.126}
};
$res := $lookup(widgets, "model.resolution");
$audio := widgets.generate_audio ? "on" : "off";
$rate := $lookup($lookup($rates, $res), $audio);
@ -2943,7 +2975,12 @@ class KlingVideoNode(IO.ComfyNode):
start_frame: Input.Image | None = None,
) -> IO.NodeOutput:
_ = seed
mode = "pro" if model["resolution"] == "1080p" else "std"
if model["resolution"] == "4k":
mode = "4k"
elif model["resolution"] == "1080p":
mode = "pro"
else:
mode = "std"
custom_multi_shot = False
if multi_shot["multi_shot"] == "disabled":
shot_type = None
@ -3025,6 +3062,7 @@ class KlingVideoNode(IO.ComfyNode):
cls,
ApiEndpoint(path=poll_path),
response_model=TaskStatusResponse,
max_poll_attempts=280,
status_extractor=lambda r: (r.data.task_status if r.data else None),
)
return IO.NodeOutput(await download_url_to_video_output(final_response.data.task_result.videos[0].url))
@ -3057,7 +3095,7 @@ class KlingFirstLastFrameNode(IO.ComfyNode):
IO.DynamicCombo.Option(
"kling-v3",
[
IO.Combo.Input("resolution", options=["1080p", "720p"]),
IO.Combo.Input("resolution", options=["4k", "1080p", "720p"], default="1080p"),
],
),
],
@ -3089,7 +3127,11 @@ class KlingFirstLastFrameNode(IO.ComfyNode):
),
expr="""
(
$rates := {"1080p": {"off": 0.112, "on": 0.168}, "720p": {"off": 0.084, "on": 0.126}};
$rates := {
"4k": {"off": 0.42, "on": 0.42},
"1080p": {"off": 0.112, "on": 0.168},
"720p": {"off": 0.084, "on": 0.126}
};
$res := $lookup(widgets, "model.resolution");
$audio := widgets.generate_audio ? "on" : "off";
$rate := $lookup($lookup($rates, $res), $audio);
@ -3118,6 +3160,12 @@ class KlingFirstLastFrameNode(IO.ComfyNode):
validate_image_aspect_ratio(end_frame, (1, 2.5), (2.5, 1))
image_url = await upload_image_to_comfyapi(cls, first_frame, wait_label="Uploading first frame")
image_tail_url = await upload_image_to_comfyapi(cls, end_frame, wait_label="Uploading end frame")
if model["resolution"] == "4k":
mode = "4k"
elif model["resolution"] == "1080p":
mode = "pro"
else:
mode = "std"
response = await sync_op(
cls,
ApiEndpoint(path="/proxy/kling/v1/videos/image2video", method="POST"),
@ -3127,7 +3175,7 @@ class KlingFirstLastFrameNode(IO.ComfyNode):
image=image_url,
image_tail=image_tail_url,
prompt=prompt,
mode="pro" if model["resolution"] == "1080p" else "std",
mode=mode,
duration=str(duration),
sound="on" if generate_audio else "off",
),
@ -3140,6 +3188,7 @@ class KlingFirstLastFrameNode(IO.ComfyNode):
cls,
ApiEndpoint(path=f"/proxy/kling/v1/videos/image2video/{response.data.task_id}"),
response_model=TaskStatusResponse,
max_poll_attempts=280,
status_extractor=lambda r: (r.data.task_status if r.data else None),
)
return IO.NodeOutput(await download_url_to_video_output(final_response.data.task_result.videos[0].url))

View File

@ -357,6 +357,10 @@ def calculate_tokens_price_image_1_5(response: OpenAIImageGenerationResponse) ->
return ((response.usage.input_tokens * 8.0) + (response.usage.output_tokens * 32.0)) / 1_000_000.0
def calculate_tokens_price_image_2_0(response: OpenAIImageGenerationResponse) -> float | None:
return ((response.usage.input_tokens * 8.0) + (response.usage.output_tokens * 30.0)) / 1_000_000.0
class OpenAIGPTImage1(IO.ComfyNode):
@classmethod
@ -401,7 +405,17 @@ class OpenAIGPTImage1(IO.ComfyNode):
IO.Combo.Input(
"size",
default="auto",
options=["auto", "1024x1024", "1024x1536", "1536x1024"],
options=[
"auto",
"1024x1024",
"1024x1536",
"1536x1024",
"2048x2048",
"2048x1152",
"1152x2048",
"3840x2160",
"2160x3840",
],
tooltip="Image size",
optional=True,
),
@ -427,7 +441,7 @@ class OpenAIGPTImage1(IO.ComfyNode):
),
IO.Combo.Input(
"model",
options=["gpt-image-1", "gpt-image-1.5", 'gpt-image-2'],
options=["gpt-image-1", "gpt-image-1.5", "gpt-image-2"],
default="gpt-image-2",
optional=True,
),
@ -442,23 +456,36 @@ class OpenAIGPTImage1(IO.ComfyNode):
],
is_api_node=True,
price_badge=IO.PriceBadge(
depends_on=IO.PriceBadgeDepends(widgets=["quality", "n"]),
depends_on=IO.PriceBadgeDepends(widgets=["quality", "n", "model"]),
expr="""
(
$ranges := {
"low": [0.011, 0.02],
"medium": [0.046, 0.07],
"high": [0.167, 0.3]
"gpt-image-1": {
"low": [0.011, 0.02],
"medium": [0.042, 0.07],
"high": [0.167, 0.25]
},
"gpt-image-1.5": {
"low": [0.009, 0.02],
"medium": [0.034, 0.062],
"high": [0.133, 0.22]
},
"gpt-image-2": {
"low": [0.0048, 0.012],
"medium": [0.041, 0.112],
"high": [0.165, 0.43]
}
};
$range := $lookup($ranges, widgets.quality);
$n := widgets.n;
$range := $lookup($lookup($ranges, widgets.model), widgets.quality);
$nRaw := widgets.n;
$n := ($nRaw != null and $nRaw != 0) ? $nRaw : 1;
($n = 1)
? {"type":"range_usd","min_usd": $range[0], "max_usd": $range[1]}
? {"type":"range_usd","min_usd": $range[0], "max_usd": $range[1], "format": {"approximate": true}}
: {
"type":"range_usd",
"min_usd": $range[0],
"max_usd": $range[1],
"format": { "suffix": " x " & $string($n) & "/Run" }
"min_usd": $range[0] * $n,
"max_usd": $range[1] * $n,
"format": { "suffix": "/Run", "approximate": true }
}
)
""",
@ -483,12 +510,18 @@ class OpenAIGPTImage1(IO.ComfyNode):
if mask is not None and image is None:
raise ValueError("Cannot use a mask without an input image")
if model in ("gpt-image-1", "gpt-image-1.5"):
if size not in ("auto", "1024x1024", "1024x1536", "1536x1024"):
raise ValueError(f"Resolution {size} is only supported by GPT Image 2 model")
if model == "gpt-image-1":
price_extractor = calculate_tokens_price_image_1
elif model == "gpt-image-1.5":
price_extractor = calculate_tokens_price_image_1_5
elif model == "gpt-image-2":
price_extractor = calculate_tokens_price_image_1_5
price_extractor = calculate_tokens_price_image_2_0
if background == "transparent":
raise ValueError("Transparent background is not supported for GPT Image 2 model")
else:
raise ValueError(f"Unknown model: {model}")

View File

@ -33,9 +33,13 @@ class OpenAIVideoSora2(IO.ComfyNode):
def define_schema(cls):
return IO.Schema(
node_id="OpenAIVideoSora2",
display_name="OpenAI Sora - Video",
display_name="OpenAI Sora - Video (Deprecated)",
category="api node/video/Sora",
description="OpenAI video and audio generation.",
description=(
"OpenAI video and audio generation.\n\n"
"DEPRECATION NOTICE: OpenAI will stop serving the Sora v2 API in September 2026. "
"This node will be removed from ComfyUI at that time."
),
inputs=[
IO.Combo.Input(
"model",

View File

@ -1646,6 +1646,557 @@ class Wan2ReferenceVideoApi(IO.ComfyNode):
return IO.NodeOutput(await download_url_to_video_output(response.output.video_url))
class HappyHorseTextToVideoApi(IO.ComfyNode):
@classmethod
def define_schema(cls):
return IO.Schema(
node_id="HappyHorseTextToVideoApi",
display_name="HappyHorse Text to Video",
category="api node/video/Wan",
description="Generates a video based on a text prompt using the HappyHorse model.",
inputs=[
IO.DynamicCombo.Input(
"model",
options=[
IO.DynamicCombo.Option(
"happyhorse-1.0-t2v",
[
IO.String.Input(
"prompt",
multiline=True,
default="",
tooltip="Prompt describing the elements and visual features. "
"Supports English and Chinese.",
),
IO.Combo.Input(
"resolution",
options=["720P", "1080P"],
),
IO.Combo.Input(
"ratio",
options=["16:9", "9:16", "1:1", "4:3", "3:4"],
),
IO.Int.Input(
"duration",
default=5,
min=3,
max=15,
step=1,
display_mode=IO.NumberDisplay.number,
),
],
),
],
),
IO.Int.Input(
"seed",
default=0,
min=0,
max=2147483647,
step=1,
display_mode=IO.NumberDisplay.number,
control_after_generate=True,
tooltip="Seed to use for generation.",
),
IO.Boolean.Input(
"watermark",
default=False,
tooltip="Whether to add an AI-generated watermark to the result.",
advanced=True,
),
],
outputs=[
IO.Video.Output(),
],
hidden=[
IO.Hidden.auth_token_comfy_org,
IO.Hidden.api_key_comfy_org,
IO.Hidden.unique_id,
],
is_api_node=True,
price_badge=IO.PriceBadge(
depends_on=IO.PriceBadgeDepends(widgets=["model", "model.resolution", "model.duration"]),
expr="""
(
$res := $lookup(widgets, "model.resolution");
$dur := $lookup(widgets, "model.duration");
$ppsTable := { "720p": 0.14, "1080p": 0.24 };
$pps := $lookup($ppsTable, $res);
{ "type": "usd", "usd": $pps * $dur }
)
""",
),
)
@classmethod
async def execute(
cls,
model: dict,
seed: int,
watermark: bool,
):
validate_string(model["prompt"], strip_whitespace=False, min_length=1)
initial_response = await sync_op(
cls,
ApiEndpoint(
path="/proxy/wan/api/v1/services/aigc/video-generation/video-synthesis",
method="POST",
),
response_model=TaskCreationResponse,
data=Wan27Text2VideoTaskCreationRequest(
model=model["model"],
input=Text2VideoInputField(
prompt=model["prompt"],
negative_prompt=None,
),
parameters=Wan27Text2VideoParametersField(
resolution=model["resolution"],
ratio=model["ratio"],
duration=model["duration"],
seed=seed,
watermark=watermark,
),
),
)
if not initial_response.output:
raise Exception(f"An unknown error occurred: {initial_response.code} - {initial_response.message}")
response = await poll_op(
cls,
ApiEndpoint(path=f"/proxy/wan/api/v1/tasks/{initial_response.output.task_id}"),
response_model=VideoTaskStatusResponse,
status_extractor=lambda x: x.output.task_status,
poll_interval=7,
)
return IO.NodeOutput(await download_url_to_video_output(response.output.video_url))
class HappyHorseImageToVideoApi(IO.ComfyNode):
@classmethod
def define_schema(cls):
return IO.Schema(
node_id="HappyHorseImageToVideoApi",
display_name="HappyHorse Image to Video",
category="api node/video/Wan",
description="Generate a video from a first-frame image using the HappyHorse model.",
inputs=[
IO.DynamicCombo.Input(
"model",
options=[
IO.DynamicCombo.Option(
"happyhorse-1.0-i2v",
[
IO.String.Input(
"prompt",
multiline=True,
default="",
tooltip="Prompt describing the elements and visual features. "
"Supports English and Chinese.",
),
IO.Combo.Input(
"resolution",
options=["720P", "1080P"],
),
IO.Int.Input(
"duration",
default=5,
min=3,
max=15,
step=1,
display_mode=IO.NumberDisplay.number,
),
],
),
],
),
IO.Image.Input(
"first_frame",
tooltip="First frame image. The output aspect ratio is derived from this image.",
),
IO.Int.Input(
"seed",
default=0,
min=0,
max=2147483647,
step=1,
display_mode=IO.NumberDisplay.number,
control_after_generate=True,
tooltip="Seed to use for generation.",
),
IO.Boolean.Input(
"watermark",
default=False,
tooltip="Whether to add an AI-generated watermark to the result.",
advanced=True,
),
],
outputs=[
IO.Video.Output(),
],
hidden=[
IO.Hidden.auth_token_comfy_org,
IO.Hidden.api_key_comfy_org,
IO.Hidden.unique_id,
],
is_api_node=True,
price_badge=IO.PriceBadge(
depends_on=IO.PriceBadgeDepends(widgets=["model", "model.resolution", "model.duration"]),
expr="""
(
$res := $lookup(widgets, "model.resolution");
$dur := $lookup(widgets, "model.duration");
$ppsTable := { "720p": 0.14, "1080p": 0.24 };
$pps := $lookup($ppsTable, $res);
{ "type": "usd", "usd": $pps * $dur }
)
""",
),
)
@classmethod
async def execute(
cls,
model: dict,
first_frame: Input.Image,
seed: int,
watermark: bool,
):
media = [
Wan27MediaItem(
type="first_frame",
url=await upload_image_to_comfyapi(cls, image=first_frame),
)
]
initial_response = await sync_op(
cls,
ApiEndpoint(
path="/proxy/wan/api/v1/services/aigc/video-generation/video-synthesis",
method="POST",
),
response_model=TaskCreationResponse,
data=Wan27ImageToVideoTaskCreationRequest(
model=model["model"],
input=Wan27ImageToVideoInputField(
prompt=model["prompt"] or None,
negative_prompt=None,
media=media,
),
parameters=Wan27ImageToVideoParametersField(
resolution=model["resolution"],
duration=model["duration"],
seed=seed,
watermark=watermark,
),
),
)
if not initial_response.output:
raise Exception(f"An unknown error occurred: {initial_response.code} - {initial_response.message}")
response = await poll_op(
cls,
ApiEndpoint(path=f"/proxy/wan/api/v1/tasks/{initial_response.output.task_id}"),
response_model=VideoTaskStatusResponse,
status_extractor=lambda x: x.output.task_status,
poll_interval=7,
)
return IO.NodeOutput(await download_url_to_video_output(response.output.video_url))
class HappyHorseVideoEditApi(IO.ComfyNode):
@classmethod
def define_schema(cls):
return IO.Schema(
node_id="HappyHorseVideoEditApi",
display_name="HappyHorse Video Edit",
category="api node/video/Wan",
description="Edit a video using text instructions or reference images with the HappyHorse model. "
"Output duration is 3-15s and matches the input video; inputs longer than 15s are truncated.",
inputs=[
IO.DynamicCombo.Input(
"model",
options=[
IO.DynamicCombo.Option(
"happyhorse-1.0-video-edit",
[
IO.String.Input(
"prompt",
multiline=True,
default="",
tooltip="Editing instructions or style transfer requirements.",
),
IO.Combo.Input(
"resolution",
options=["720P", "1080P"],
),
IO.Combo.Input(
"ratio",
options=["16:9", "9:16", "1:1", "4:3", "3:4"],
tooltip="Aspect ratio. If not changed, approximates the input video ratio.",
),
IO.Autogrow.Input(
"reference_images",
template=IO.Autogrow.TemplateNames(
IO.Image.Input("reference_image"),
names=[
"image1",
"image2",
"image3",
"image4",
"image5",
],
min=0,
),
),
],
),
],
),
IO.Video.Input(
"video",
tooltip="The video to edit.",
),
IO.Int.Input(
"seed",
default=0,
min=0,
max=2147483647,
step=1,
display_mode=IO.NumberDisplay.number,
control_after_generate=True,
tooltip="Seed to use for generation.",
),
IO.Boolean.Input(
"watermark",
default=False,
tooltip="Whether to add an AI-generated watermark to the result.",
advanced=True,
),
],
outputs=[
IO.Video.Output(),
],
hidden=[
IO.Hidden.auth_token_comfy_org,
IO.Hidden.api_key_comfy_org,
IO.Hidden.unique_id,
],
is_api_node=True,
price_badge=IO.PriceBadge(
depends_on=IO.PriceBadgeDepends(widgets=["model", "model.resolution"]),
expr="""
(
$res := $lookup(widgets, "model.resolution");
$ppsTable := { "720p": 0.14, "1080p": 0.24 };
$pps := $lookup($ppsTable, $res);
{ "type": "usd", "usd": $pps, "format": { "suffix": "/second" } }
)
""",
),
)
@classmethod
async def execute(
cls,
model: dict,
video: Input.Video,
seed: int,
watermark: bool,
):
validate_string(model["prompt"], strip_whitespace=False, min_length=1)
validate_video_duration(video, min_duration=3, max_duration=60)
media = [Wan27MediaItem(type="video", url=await upload_video_to_comfyapi(cls, video))]
reference_images = model.get("reference_images", {})
for key in reference_images:
media.append(
Wan27MediaItem(
type="reference_image", url=await upload_image_to_comfyapi(cls, image=reference_images[key])
)
)
initial_response = await sync_op(
cls,
ApiEndpoint(
path="/proxy/wan/api/v1/services/aigc/video-generation/video-synthesis",
method="POST",
),
response_model=TaskCreationResponse,
data=Wan27VideoEditTaskCreationRequest(
model=model["model"],
input=Wan27VideoEditInputField(prompt=model["prompt"], media=media),
parameters=Wan27VideoEditParametersField(
resolution=model["resolution"],
ratio=model["ratio"],
duration=None,
watermark=watermark,
seed=seed,
),
),
)
if not initial_response.output:
raise Exception(f"An unknown error occurred: {initial_response.code} - {initial_response.message}")
response = await poll_op(
cls,
ApiEndpoint(path=f"/proxy/wan/api/v1/tasks/{initial_response.output.task_id}"),
response_model=VideoTaskStatusResponse,
status_extractor=lambda x: x.output.task_status,
poll_interval=7,
)
return IO.NodeOutput(await download_url_to_video_output(response.output.video_url))
class HappyHorseReferenceVideoApi(IO.ComfyNode):
@classmethod
def define_schema(cls):
return IO.Schema(
node_id="HappyHorseReferenceVideoApi",
display_name="HappyHorse Reference to Video",
category="api node/video/Wan",
description="Generate a video featuring a person or object from reference materials with the HappyHorse "
"model. Supports single-character performances and multi-character interactions.",
inputs=[
IO.DynamicCombo.Input(
"model",
options=[
IO.DynamicCombo.Option(
"happyhorse-1.0-r2v",
[
IO.String.Input(
"prompt",
multiline=True,
default="",
tooltip="Prompt describing the video. Use identifiers such as 'character1' and "
"'character2' to refer to the reference characters.",
),
IO.Combo.Input(
"resolution",
options=["720P", "1080P"],
),
IO.Combo.Input(
"ratio",
options=["16:9", "9:16", "1:1", "4:3", "3:4"],
),
IO.Int.Input(
"duration",
default=5,
min=3,
max=15,
step=1,
display_mode=IO.NumberDisplay.number,
),
IO.Autogrow.Input(
"reference_images",
template=IO.Autogrow.TemplateNames(
IO.Image.Input("reference_image"),
names=[
"image1",
"image2",
"image3",
"image4",
"image5",
"image6",
"image7",
"image8",
"image9",
],
min=1,
),
),
],
),
],
),
IO.Int.Input(
"seed",
default=0,
min=0,
max=2147483647,
step=1,
display_mode=IO.NumberDisplay.number,
control_after_generate=True,
tooltip="Seed to use for generation.",
),
IO.Boolean.Input(
"watermark",
default=False,
tooltip="Whether to add an AI-generated watermark to the result.",
advanced=True,
),
],
outputs=[
IO.Video.Output(),
],
hidden=[
IO.Hidden.auth_token_comfy_org,
IO.Hidden.api_key_comfy_org,
IO.Hidden.unique_id,
],
is_api_node=True,
price_badge=IO.PriceBadge(
depends_on=IO.PriceBadgeDepends(widgets=["model", "model.resolution", "model.duration"]),
expr="""
(
$res := $lookup(widgets, "model.resolution");
$dur := $lookup(widgets, "model.duration");
$ppsTable := { "720p": 0.14, "1080p": 0.24 };
$pps := $lookup($ppsTable, $res);
{ "type": "usd", "usd": $pps * $dur }
)
""",
),
)
@classmethod
async def execute(
cls,
model: dict,
seed: int,
watermark: bool,
):
validate_string(model["prompt"], strip_whitespace=False, min_length=1)
media = []
reference_images = model.get("reference_images", {})
for key in reference_images:
media.append(
Wan27MediaItem(
type="reference_image",
url=await upload_image_to_comfyapi(cls, image=reference_images[key]),
)
)
if not media:
raise ValueError("At least one reference reference image must be provided.")
initial_response = await sync_op(
cls,
ApiEndpoint(
path="/proxy/wan/api/v1/services/aigc/video-generation/video-synthesis",
method="POST",
),
response_model=TaskCreationResponse,
data=Wan27ReferenceVideoTaskCreationRequest(
model=model["model"],
input=Wan27ReferenceVideoInputField(
prompt=model["prompt"],
negative_prompt=None,
media=media,
),
parameters=Wan27ReferenceVideoParametersField(
resolution=model["resolution"],
ratio=model["ratio"],
duration=model["duration"],
watermark=watermark,
seed=seed,
),
),
)
if not initial_response.output:
raise Exception(f"An unknown error occurred: {initial_response.code} - {initial_response.message}")
response = await poll_op(
cls,
ApiEndpoint(path=f"/proxy/wan/api/v1/tasks/{initial_response.output.task_id}"),
response_model=VideoTaskStatusResponse,
status_extractor=lambda x: x.output.task_status,
poll_interval=7,
)
return IO.NodeOutput(await download_url_to_video_output(response.output.video_url))
class WanApiExtension(ComfyExtension):
@override
async def get_node_list(self) -> list[type[IO.ComfyNode]]:
@ -1660,6 +2211,10 @@ class WanApiExtension(ComfyExtension):
Wan2VideoContinuationApi,
Wan2VideoEditApi,
Wan2ReferenceVideoApi,
HappyHorseTextToVideoApi,
HappyHorseImageToVideoApi,
HappyHorseVideoEditApi,
HappyHorseReferenceVideoApi,
]

View File

@ -1,6 +1,7 @@
import nodes
import node_helpers
import torch
import torchaudio
import comfy.model_management
import comfy.model_sampling
import comfy.samplers
@ -711,7 +712,14 @@ class LTXVReferenceAudio(io.ComfyNode):
@classmethod
def execute(cls, model, positive, negative, reference_audio, audio_vae, identity_guidance_scale, start_percent, end_percent) -> io.NodeOutput:
# Encode reference audio to latents and patchify
audio_latents = audio_vae.encode(reference_audio)
sample_rate = reference_audio["sample_rate"]
vae_sample_rate = getattr(audio_vae, "audio_sample_rate", 44100)
if vae_sample_rate != sample_rate:
waveform = torchaudio.functional.resample(reference_audio["waveform"], sample_rate, vae_sample_rate)
else:
waveform = reference_audio["waveform"]
audio_latents = audio_vae.encode(waveform.movedim(1, -1))
b, c, t, f = audio_latents.shape
ref_tokens = audio_latents.permute(0, 2, 1, 3).reshape(b, t, c * f)
ref_audio = {"tokens": ref_tokens}

View File

@ -1,5 +1,6 @@
import json
from comfy.comfy_types.node_typing import IO
import torch
# Preview Any - original implement from
# https://github.com/rgthree/rgthree-comfy/blob/main/py/display_any.py
@ -19,6 +20,7 @@ class PreviewAny():
SEARCH_ALIASES = ["show output", "inspect", "debug", "print value", "show text"]
def main(self, source=None):
torch.set_printoptions(edgeitems=6)
value = 'None'
if isinstance(source, str):
value = source
@ -33,6 +35,7 @@ class PreviewAny():
except Exception:
value = 'source exists, but could not be serialized.'
torch.set_printoptions()
return {"ui": {"text": (value,)}, "result": (value,)}
NODE_CLASS_MAPPINGS = {

View File

@ -54,7 +54,7 @@ class EmptySD3LatentImage(io.ComfyNode):
@classmethod
def execute(cls, width, height, batch_size=1) -> io.NodeOutput:
latent = torch.zeros([batch_size, 16, height // 8, width // 8], device=comfy.model_management.intermediate_device())
latent = torch.zeros([batch_size, 16, height // 8, width // 8], device=comfy.model_management.intermediate_device(), dtype=comfy.model_management.intermediate_dtype())
return io.NodeOutput({"samples": latent, "downscale_ratio_spacial": 8})
generate = execute # TODO: remove

View File

@ -1,3 +1,3 @@
# This file is automatically generated by the build process when version is
# updated in pyproject.toml.
__version__ = "0.19.3"
__version__ = "0.20.0"

View File

@ -811,11 +811,30 @@ class PromptExecutor:
self._notify_prompt_lifecycle("end", prompt_id)
async def validate_inputs(prompt_id, prompt, item, validated):
async def validate_inputs(prompt_id, prompt, item, validated, visiting=None):
if visiting is None:
visiting = []
unique_id = item
if unique_id in validated:
return validated[unique_id]
if unique_id in visiting:
cycle_path_nodes = visiting[visiting.index(unique_id):] + [unique_id]
cycle_nodes = list(dict.fromkeys(cycle_path_nodes))
cycle_path = " -> ".join(f"{node_id} ({prompt[node_id]['class_type']})" for node_id in cycle_path_nodes)
for node_id in cycle_nodes:
validated[node_id] = (False, [{
"type": "dependency_cycle",
"message": "Dependency cycle detected",
"details": cycle_path,
"extra_info": {
"node_id": node_id,
"cycle_nodes": cycle_nodes,
}
}], node_id)
return validated[unique_id]
inputs = prompt[unique_id]['inputs']
class_type = prompt[unique_id]['class_type']
obj_class = nodes.NODE_CLASS_MAPPINGS[class_type]
@ -899,7 +918,11 @@ async def validate_inputs(prompt_id, prompt, item, validated):
errors.append(error)
continue
try:
r = await validate_inputs(prompt_id, prompt, o_id, validated)
visiting.append(unique_id)
try:
r = await validate_inputs(prompt_id, prompt, o_id, validated, visiting)
finally:
visiting.pop()
if r[0] is False:
# `r` will be set in `validated[o_id]` already
valid = False
@ -1048,10 +1071,13 @@ async def validate_inputs(prompt_id, prompt, item, validated):
errors.append(error)
continue
if len(errors) > 0 or valid is not True:
ret = (False, errors, unique_id)
else:
ret = (True, [], unique_id)
ret = validated.get(unique_id, (True, [], unique_id))
# Recursive cycle detection may have already populated an error on us. Join it.
ret = (
ret[0] and valid is True and not errors,
ret[1] + [error for error in errors if error not in ret[1]],
unique_id,
)
validated[unique_id] = ret
return ret

3231
openapi.yaml Normal file

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,6 @@
[project]
name = "ComfyUI"
version = "0.19.3"
version = "0.20.0"
readme = "README.md"
license = { file = "LICENSE" }
requires-python = ">=3.10"

View File

@ -1,6 +1,6 @@
comfyui-frontend-package==1.42.14
comfyui-workflow-templates==0.9.59
comfyui-embedded-docs==0.4.3
comfyui-frontend-package==1.42.15
comfyui-workflow-templates==0.9.63
comfyui-embedded-docs==0.4.4
torch
torchsde
torchvision
@ -23,7 +23,7 @@ SQLAlchemy>=2.0
filelock
av>=14.2.0
comfy-kitchen>=0.2.8
comfy-aimdo>=0.2.12
comfy-aimdo==0.2.14
requests
simpleeval>=1.0.0
blake3