diff --git a/blueprints/Brightness and Contrast.json b/blueprints/Brightness and Contrast.json index 90bfe999d..78fc52f29 100644 --- a/blueprints/Brightness and Contrast.json +++ b/blueprints/Brightness and Contrast.json @@ -431,9 +431,10 @@ "extra": { "workflowRendererVersion": "LG" }, - "category": "Image Tools/Color adjust" + "category": "Image Tools/Color adjust", + "description": "Adjusts image brightness and contrast using a real-time GPU fragment shader." } ] }, "extra": {} -} +} \ No newline at end of file diff --git a/blueprints/Canny to Image (Z-Image-Turbo).json b/blueprints/Canny to Image (Z-Image-Turbo).json index ff9717308..ee36fc916 100644 --- a/blueprints/Canny to Image (Z-Image-Turbo).json +++ b/blueprints/Canny to Image (Z-Image-Turbo).json @@ -1553,7 +1553,8 @@ "VHS_MetadataImage": true, "VHS_KeepIntermediate": true }, - "category": "Image generation and editing/Canny to image" + "category": "Image generation and editing/Canny to image", + "description": "Generates an image from a Canny edge map using Z-Image-Turbo, with text conditioning and fast inference." } ] }, @@ -1574,4 +1575,4 @@ } }, "version": 0.4 -} +} \ No newline at end of file diff --git a/blueprints/Canny to Video (LTX 2.0).json b/blueprints/Canny to Video (LTX 2.0).json index fae8321b9..0eab82f49 100644 --- a/blueprints/Canny to Video (LTX 2.0).json +++ b/blueprints/Canny to Video (LTX 2.0).json @@ -3600,7 +3600,8 @@ "extra": { "workflowRendererVersion": "LG" }, - "category": "Video generation and editing/Canny to video" + "category": "Video generation and editing/Canny to video", + "description": "Generates video from Canny edge maps using LTX-2, with optional synchronized audio." } ] }, @@ -3616,4 +3617,4 @@ } }, "version": 0.4 -} +} \ No newline at end of file diff --git a/blueprints/Chromatic Aberration.json b/blueprints/Chromatic Aberration.json index ae8037b1b..60be1e921 100644 --- a/blueprints/Chromatic Aberration.json +++ b/blueprints/Chromatic Aberration.json @@ -232,7 +232,7 @@ "Node name for S&R": "GLSLShader" }, "widgets_values": [ - "#version 300 es\nprecision highp float;\n\nuniform sampler2D u_image0;\nuniform vec2 u_resolution;\nuniform int u_int0; // Mode\nuniform float u_float0; // Amount (0 to 100)\n\nin vec2 v_texCoord;\nout vec4 fragColor;\n\nconst int MODE_LINEAR = 0;\nconst int MODE_RADIAL = 1;\nconst int MODE_BARREL = 2;\nconst int MODE_SWIRL = 3;\nconst int MODE_DIAGONAL = 4;\n\nconst float AMOUNT_SCALE = 0.0005;\nconst float RADIAL_MULT = 4.0;\nconst float BARREL_MULT = 8.0;\nconst float INV_SQRT2 = 0.70710678118;\n\nvoid main() {\n vec2 uv = v_texCoord;\n vec4 original = texture(u_image0, uv);\n\n float amount = u_float0 * AMOUNT_SCALE;\n\n if (amount < 0.000001) {\n fragColor = original;\n return;\n }\n\n // Aspect-corrected coordinates for circular effects\n float aspect = u_resolution.x / u_resolution.y;\n vec2 centered = uv - 0.5;\n vec2 corrected = vec2(centered.x * aspect, centered.y);\n float r = length(corrected);\n vec2 dir = r > 0.0001 ? corrected / r : vec2(0.0);\n vec2 offset = vec2(0.0);\n\n if (u_int0 == MODE_LINEAR) {\n // Horizontal shift (no aspect correction needed)\n offset = vec2(amount, 0.0);\n }\n else if (u_int0 == MODE_RADIAL) {\n // Outward from center, stronger at edges\n offset = dir * r * amount * RADIAL_MULT;\n offset.x /= aspect; // Convert back to UV space\n }\n else if (u_int0 == MODE_BARREL) {\n // Lens distortion simulation (r² falloff)\n offset = dir * r * r * amount * BARREL_MULT;\n offset.x /= aspect; // Convert back to UV space\n }\n else if (u_int0 == MODE_SWIRL) {\n // Perpendicular to radial (rotational aberration)\n vec2 perp = vec2(-dir.y, dir.x);\n offset = perp * r * amount * RADIAL_MULT;\n offset.x /= aspect; // Convert back to UV space\n }\n else if (u_int0 == MODE_DIAGONAL) {\n // 45° offset (no aspect correction needed)\n offset = vec2(amount, amount) * INV_SQRT2;\n }\n \n float red = texture(u_image0, uv + offset).r;\n float green = original.g;\n float blue = texture(u_image0, uv - offset).b;\n \n fragColor = vec4(red, green, blue, original.a);\n}", + "#version 300 es\nprecision highp float;\n\nuniform sampler2D u_image0;\nuniform vec2 u_resolution;\nuniform int u_int0; // Mode\nuniform float u_float0; // Amount (0 to 100)\n\nin vec2 v_texCoord;\nout vec4 fragColor;\n\nconst int MODE_LINEAR = 0;\nconst int MODE_RADIAL = 1;\nconst int MODE_BARREL = 2;\nconst int MODE_SWIRL = 3;\nconst int MODE_DIAGONAL = 4;\n\nconst float AMOUNT_SCALE = 0.0005;\nconst float RADIAL_MULT = 4.0;\nconst float BARREL_MULT = 8.0;\nconst float INV_SQRT2 = 0.70710678118;\n\nvoid main() {\n vec2 uv = v_texCoord;\n vec4 original = texture(u_image0, uv);\n\n float amount = u_float0 * AMOUNT_SCALE;\n\n if (amount < 0.000001) {\n fragColor = original;\n return;\n }\n\n // Aspect-corrected coordinates for circular effects\n float aspect = u_resolution.x / u_resolution.y;\n vec2 centered = uv - 0.5;\n vec2 corrected = vec2(centered.x * aspect, centered.y);\n float r = length(corrected);\n vec2 dir = r > 0.0001 ? corrected / r : vec2(0.0);\n vec2 offset = vec2(0.0);\n\n if (u_int0 == MODE_LINEAR) {\n // Horizontal shift (no aspect correction needed)\n offset = vec2(amount, 0.0);\n }\n else if (u_int0 == MODE_RADIAL) {\n // Outward from center, stronger at edges\n offset = dir * r * amount * RADIAL_MULT;\n offset.x /= aspect; // Convert back to UV space\n }\n else if (u_int0 == MODE_BARREL) {\n // Lens distortion simulation (r\u00b2 falloff)\n offset = dir * r * r * amount * BARREL_MULT;\n offset.x /= aspect; // Convert back to UV space\n }\n else if (u_int0 == MODE_SWIRL) {\n // Perpendicular to radial (rotational aberration)\n vec2 perp = vec2(-dir.y, dir.x);\n offset = perp * r * amount * RADIAL_MULT;\n offset.x /= aspect; // Convert back to UV space\n }\n else if (u_int0 == MODE_DIAGONAL) {\n // 45\u00b0 offset (no aspect correction needed)\n offset = vec2(amount, amount) * INV_SQRT2;\n }\n \n float red = texture(u_image0, uv + offset).r;\n float green = original.g;\n float blue = texture(u_image0, uv - offset).b;\n \n fragColor = vec4(red, green, blue, original.a);\n}", "from_input" ] }, @@ -377,8 +377,9 @@ "extra": { "workflowRendererVersion": "LG" }, - "category": "Image Tools/Color adjust" + "category": "Image Tools/Color adjust", + "description": "Adds lens-style chromatic aberration (color fringing) using a real-time GPU fragment shader." } ] } -} +} \ No newline at end of file diff --git a/blueprints/Color Adjustment.json b/blueprints/Color Adjustment.json index 622bf28af..44f5e1a92 100644 --- a/blueprints/Color Adjustment.json +++ b/blueprints/Color Adjustment.json @@ -596,8 +596,9 @@ "extra": { "workflowRendererVersion": "LG" }, - "category": "Image Tools/Color adjust" + "category": "Image Tools/Color adjust", + "description": "Adjusts saturation, temperature, and tint using a real-time GPU fragment shader." } ] } -} +} \ No newline at end of file diff --git a/blueprints/Color Balance.json b/blueprints/Color Balance.json index 21d6319ed..d921eab37 100644 --- a/blueprints/Color Balance.json +++ b/blueprints/Color Balance.json @@ -1129,7 +1129,8 @@ "extra": { "workflowRendererVersion": "LG" }, - "category": "Image Tools/Color adjust" + "category": "Image Tools/Color adjust", + "description": "Balances colors across shadows, midtones, and highlights using a real-time GPU fragment shader." } ] } diff --git a/blueprints/Color Curves.json b/blueprints/Color Curves.json index 1461cf396..b9bfb7029 100644 --- a/blueprints/Color Curves.json +++ b/blueprints/Color Curves.json @@ -608,7 +608,8 @@ "extra": { "workflowRendererVersion": "LG" }, - "category": "Image Tools/Color adjust" + "category": "Image Tools/Color adjust", + "description": "Fine-tunes tone and color with per-channel curve adjustments using a real-time GPU fragment shader." } ] } diff --git a/blueprints/Crop Images 2x2.json b/blueprints/Crop Images 2x2.json index 2aa42cfc3..261de1856 100644 --- a/blueprints/Crop Images 2x2.json +++ b/blueprints/Crop Images 2x2.json @@ -1609,7 +1609,8 @@ } ], "extra": {}, - "category": "Image Tools/Crop" + "category": "Image Tools/Crop", + "description": "Splits an image into a 2\u00d72 grid of four equal tiles." } ] }, diff --git a/blueprints/Crop Images 3x3.json b/blueprints/Crop Images 3x3.json index 3a3615ac8..74176dc3e 100644 --- a/blueprints/Crop Images 3x3.json +++ b/blueprints/Crop Images 3x3.json @@ -368,7 +368,7 @@ ] } ], - "title": "Math Expression (Width)", + "title": "Math Expression \uff08Width\uff09", "properties": { "ue_properties": { "widget_ue_connectable": {}, @@ -2946,7 +2946,8 @@ } ], "extra": {}, - "category": "Image Tools/Crop" + "category": "Image Tools/Crop", + "description": "Splits an image into a 3\u00d73 grid of nine equal tiles." } ] }, diff --git a/blueprints/Depth to Image (Z-Image-Turbo).json b/blueprints/Depth to Image (Z-Image-Turbo).json index 4f69a8149..fe9ef0f72 100644 --- a/blueprints/Depth to Image (Z-Image-Turbo).json +++ b/blueprints/Depth to Image (Z-Image-Turbo).json @@ -1579,7 +1579,8 @@ "VHS_MetadataImage": true, "VHS_KeepIntermediate": true }, - "category": "Image generation and editing/Depth to image" + "category": "Image generation and editing/Depth to image", + "description": "Generates an image from a depth map using Z-Image-Turbo with text conditioning." }, { "id": "458bdf3c-4b58-421c-af50-c9c663a4d74c", @@ -2461,7 +2462,8 @@ ] }, "workflowRendererVersion": "LG" - } + }, + "description": "Estimates a monocular depth map from an input image using the Lotus depth estimation model." } ] }, diff --git a/blueprints/Depth to Video (ltx 2.0).json b/blueprints/Depth to Video (ltx 2.0).json index f15212520..bb28695a2 100644 --- a/blueprints/Depth to Video (ltx 2.0).json +++ b/blueprints/Depth to Video (ltx 2.0).json @@ -4233,7 +4233,8 @@ "extra": { "workflowRendererVersion": "LG" }, - "category": "Video generation and editing/Depth to video" + "category": "Video generation and editing/Depth to video", + "description": "Generates video from depth maps using LTX-2, with optional synchronized audio." }, { "id": "38b60539-50a7-42f9-a5fe-bdeca26272e2", @@ -5192,7 +5193,8 @@ ], "extra": { "workflowRendererVersion": "LG" - } + }, + "description": "Estimates a monocular depth map from an input image using the Lotus depth estimation model." } ] }, diff --git a/blueprints/Edge-Preserving Blur.json b/blueprints/Edge-Preserving Blur.json index 18012beb1..d3a61e3f2 100644 --- a/blueprints/Edge-Preserving Blur.json +++ b/blueprints/Edge-Preserving Blur.json @@ -252,7 +252,7 @@ "Node name for S&R": "GLSLShader" }, "widgets_values": [ - "#version 300 es\nprecision highp float;\n\nuniform sampler2D u_image0;\nuniform float u_float0; // Blur radius (0–20, default ~5)\nuniform float u_float1; // Edge threshold (0–100, default ~30)\nuniform int u_int0; // Step size (0/1 = every pixel, 2+ = skip pixels)\n\nin vec2 v_texCoord;\nout vec4 fragColor;\n\nconst int MAX_RADIUS = 20;\nconst float EPSILON = 0.0001;\n\n// Perceptual luminance\nfloat getLuminance(vec3 rgb) {\n return dot(rgb, vec3(0.299, 0.587, 0.114));\n}\n\nvec4 bilateralFilter(vec2 uv, vec2 texelSize, int radius,\n float sigmaSpatial, float sigmaColor)\n{\n vec4 center = texture(u_image0, uv);\n vec3 centerRGB = center.rgb;\n\n float invSpatial2 = -0.5 / (sigmaSpatial * sigmaSpatial);\n float invColor2 = -0.5 / (sigmaColor * sigmaColor + EPSILON);\n\n vec3 sumRGB = vec3(0.0);\n float sumWeight = 0.0;\n\n int step = max(u_int0, 1);\n float radius2 = float(radius * radius);\n\n for (int dy = -MAX_RADIUS; dy <= MAX_RADIUS; dy++) {\n if (dy < -radius || dy > radius) continue;\n if (abs(dy) % step != 0) continue;\n\n for (int dx = -MAX_RADIUS; dx <= MAX_RADIUS; dx++) {\n if (dx < -radius || dx > radius) continue;\n if (abs(dx) % step != 0) continue;\n\n vec2 offset = vec2(float(dx), float(dy));\n float dist2 = dot(offset, offset);\n if (dist2 > radius2) continue;\n\n vec3 sampleRGB = texture(u_image0, uv + offset * texelSize).rgb;\n\n // Spatial Gaussian\n float spatialWeight = exp(dist2 * invSpatial2);\n\n // Perceptual color distance (weighted RGB)\n vec3 diff = sampleRGB - centerRGB;\n float colorDist = dot(diff * diff, vec3(0.299, 0.587, 0.114));\n float colorWeight = exp(colorDist * invColor2);\n\n float w = spatialWeight * colorWeight;\n sumRGB += sampleRGB * w;\n sumWeight += w;\n }\n }\n\n vec3 resultRGB = sumRGB / max(sumWeight, EPSILON);\n return vec4(resultRGB, center.a); // preserve center alpha\n}\n\nvoid main() {\n vec2 texelSize = 1.0 / vec2(textureSize(u_image0, 0));\n\n float radiusF = clamp(u_float0, 0.0, float(MAX_RADIUS));\n int radius = int(radiusF + 0.5);\n\n if (radius == 0) {\n fragColor = texture(u_image0, v_texCoord);\n return;\n }\n\n // Edge threshold → color sigma\n // Squared curve for better low-end control\n float t = clamp(u_float1, 0.0, 100.0) / 100.0;\n t *= t;\n float sigmaColor = mix(0.01, 0.5, t);\n\n // Spatial sigma tied to radius\n float sigmaSpatial = max(radiusF * 0.75, 0.5);\n\n fragColor = bilateralFilter(\n v_texCoord,\n texelSize,\n radius,\n sigmaSpatial,\n sigmaColor\n );\n}", + "#version 300 es\nprecision highp float;\n\nuniform sampler2D u_image0;\nuniform float u_float0; // Blur radius (0\u201320, default ~5)\nuniform float u_float1; // Edge threshold (0\u2013100, default ~30)\nuniform int u_int0; // Step size (0/1 = every pixel, 2+ = skip pixels)\n\nin vec2 v_texCoord;\nout vec4 fragColor;\n\nconst int MAX_RADIUS = 20;\nconst float EPSILON = 0.0001;\n\n// Perceptual luminance\nfloat getLuminance(vec3 rgb) {\n return dot(rgb, vec3(0.299, 0.587, 0.114));\n}\n\nvec4 bilateralFilter(vec2 uv, vec2 texelSize, int radius,\n float sigmaSpatial, float sigmaColor)\n{\n vec4 center = texture(u_image0, uv);\n vec3 centerRGB = center.rgb;\n\n float invSpatial2 = -0.5 / (sigmaSpatial * sigmaSpatial);\n float invColor2 = -0.5 / (sigmaColor * sigmaColor + EPSILON);\n\n vec3 sumRGB = vec3(0.0);\n float sumWeight = 0.0;\n\n int step = max(u_int0, 1);\n float radius2 = float(radius * radius);\n\n for (int dy = -MAX_RADIUS; dy <= MAX_RADIUS; dy++) {\n if (dy < -radius || dy > radius) continue;\n if (abs(dy) % step != 0) continue;\n\n for (int dx = -MAX_RADIUS; dx <= MAX_RADIUS; dx++) {\n if (dx < -radius || dx > radius) continue;\n if (abs(dx) % step != 0) continue;\n\n vec2 offset = vec2(float(dx), float(dy));\n float dist2 = dot(offset, offset);\n if (dist2 > radius2) continue;\n\n vec3 sampleRGB = texture(u_image0, uv + offset * texelSize).rgb;\n\n // Spatial Gaussian\n float spatialWeight = exp(dist2 * invSpatial2);\n\n // Perceptual color distance (weighted RGB)\n vec3 diff = sampleRGB - centerRGB;\n float colorDist = dot(diff * diff, vec3(0.299, 0.587, 0.114));\n float colorWeight = exp(colorDist * invColor2);\n\n float w = spatialWeight * colorWeight;\n sumRGB += sampleRGB * w;\n sumWeight += w;\n }\n }\n\n vec3 resultRGB = sumRGB / max(sumWeight, EPSILON);\n return vec4(resultRGB, center.a); // preserve center alpha\n}\n\nvoid main() {\n vec2 texelSize = 1.0 / vec2(textureSize(u_image0, 0));\n\n float radiusF = clamp(u_float0, 0.0, float(MAX_RADIUS));\n int radius = int(radiusF + 0.5);\n\n if (radius == 0) {\n fragColor = texture(u_image0, v_texCoord);\n return;\n }\n\n // Edge threshold \u2192 color sigma\n // Squared curve for better low-end control\n float t = clamp(u_float1, 0.0, 100.0) / 100.0;\n t *= t;\n float sigmaColor = mix(0.01, 0.5, t);\n\n // Spatial sigma tied to radius\n float sigmaSpatial = max(radiusF * 0.75, 0.5);\n\n fragColor = bilateralFilter(\n v_texCoord,\n texelSize,\n radius,\n sigmaSpatial,\n sigmaColor\n );\n}", "from_input" ] }, @@ -450,9 +450,10 @@ "extra": { "workflowRendererVersion": "LG" }, - "category": "Image Tools/Blur" + "category": "Image Tools/Blur", + "description": "Applies bilateral (edge-preserving) blur to soften images while retaining detail." } ] }, "extra": {} -} +} \ No newline at end of file diff --git a/blueprints/Film Grain.json b/blueprints/Film Grain.json index a680b3ece..d000b713d 100644 --- a/blueprints/Film Grain.json +++ b/blueprints/Film Grain.json @@ -268,7 +268,7 @@ "Node name for S&R": "GLSLShader" }, "widgets_values": [ - "#version 300 es\nprecision highp float;\n\nuniform sampler2D u_image0;\nuniform vec2 u_resolution;\nuniform float u_float0; // grain amount [0.0 – 1.0] typical: 0.2–0.8\nuniform float u_float1; // grain size [0.3 – 3.0] lower = finer grain\nuniform float u_float2; // color amount [0.0 – 1.0] 0 = monochrome, 1 = RGB grain\nuniform float u_float3; // luminance bias [0.0 – 1.0] 0 = uniform, 1 = shadows only\nuniform int u_int0; // noise mode [0 or 1] 0 = smooth, 1 = grainy\n\nin vec2 v_texCoord;\nlayout(location = 0) out vec4 fragColor0;\n\n// High-quality integer hash (pcg-like)\nuint pcg(uint v) {\n uint state = v * 747796405u + 2891336453u;\n uint word = ((state >> ((state >> 28u) + 4u)) ^ state) * 277803737u;\n return (word >> 22u) ^ word;\n}\n\n// 2D -> 1D hash input\nuint hash2d(uvec2 p) {\n return pcg(p.x + pcg(p.y));\n}\n\n// Hash to float [0, 1]\nfloat hashf(uvec2 p) {\n return float(hash2d(p)) / float(0xffffffffu);\n}\n\n// Hash to float with offset (for RGB channels)\nfloat hashf(uvec2 p, uint offset) {\n return float(pcg(hash2d(p) + offset)) / float(0xffffffffu);\n}\n\n// Convert uniform [0,1] to roughly Gaussian distribution\n// Using simple approximation: average of multiple samples\nfloat toGaussian(uvec2 p) {\n float sum = hashf(p, 0u) + hashf(p, 1u) + hashf(p, 2u) + hashf(p, 3u);\n return (sum - 2.0) * 0.7; // Centered, scaled\n}\n\nfloat toGaussian(uvec2 p, uint offset) {\n float sum = hashf(p, offset) + hashf(p, offset + 1u) \n + hashf(p, offset + 2u) + hashf(p, offset + 3u);\n return (sum - 2.0) * 0.7;\n}\n\n// Smooth noise with better interpolation\nfloat smoothNoise(vec2 p) {\n vec2 i = floor(p);\n vec2 f = fract(p);\n \n // Quintic interpolation (less banding than cubic)\n f = f * f * f * (f * (f * 6.0 - 15.0) + 10.0);\n \n uvec2 ui = uvec2(i);\n float a = toGaussian(ui);\n float b = toGaussian(ui + uvec2(1u, 0u));\n float c = toGaussian(ui + uvec2(0u, 1u));\n float d = toGaussian(ui + uvec2(1u, 1u));\n \n return mix(mix(a, b, f.x), mix(c, d, f.x), f.y);\n}\n\nfloat smoothNoise(vec2 p, uint offset) {\n vec2 i = floor(p);\n vec2 f = fract(p);\n \n f = f * f * f * (f * (f * 6.0 - 15.0) + 10.0);\n \n uvec2 ui = uvec2(i);\n float a = toGaussian(ui, offset);\n float b = toGaussian(ui + uvec2(1u, 0u), offset);\n float c = toGaussian(ui + uvec2(0u, 1u), offset);\n float d = toGaussian(ui + uvec2(1u, 1u), offset);\n \n return mix(mix(a, b, f.x), mix(c, d, f.x), f.y);\n}\n\nvoid main() {\n vec4 color = texture(u_image0, v_texCoord);\n \n // Luminance (Rec.709)\n float luma = dot(color.rgb, vec3(0.2126, 0.7152, 0.0722));\n \n // Grain UV (resolution-independent)\n vec2 grainUV = v_texCoord * u_resolution / max(u_float1, 0.01);\n uvec2 grainPixel = uvec2(grainUV);\n \n float g;\n vec3 grainRGB;\n \n if (u_int0 == 1) {\n // Grainy mode: pure hash noise (no interpolation = no banding)\n g = toGaussian(grainPixel);\n grainRGB = vec3(\n toGaussian(grainPixel, 100u),\n toGaussian(grainPixel, 200u),\n toGaussian(grainPixel, 300u)\n );\n } else {\n // Smooth mode: interpolated with quintic curve\n g = smoothNoise(grainUV);\n grainRGB = vec3(\n smoothNoise(grainUV, 100u),\n smoothNoise(grainUV, 200u),\n smoothNoise(grainUV, 300u)\n );\n }\n \n // Luminance weighting (less grain in highlights)\n float lumWeight = mix(1.0, 1.0 - luma, clamp(u_float3, 0.0, 1.0));\n \n // Strength\n float strength = u_float0 * 0.15;\n \n // Color vs monochrome grain\n vec3 grainColor = mix(vec3(g), grainRGB, clamp(u_float2, 0.0, 1.0));\n \n color.rgb += grainColor * strength * lumWeight;\n fragColor0 = vec4(clamp(color.rgb, 0.0, 1.0), color.a);\n}\n", + "#version 300 es\nprecision highp float;\n\nuniform sampler2D u_image0;\nuniform vec2 u_resolution;\nuniform float u_float0; // grain amount [0.0 \u2013 1.0] typical: 0.2\u20130.8\nuniform float u_float1; // grain size [0.3 \u2013 3.0] lower = finer grain\nuniform float u_float2; // color amount [0.0 \u2013 1.0] 0 = monochrome, 1 = RGB grain\nuniform float u_float3; // luminance bias [0.0 \u2013 1.0] 0 = uniform, 1 = shadows only\nuniform int u_int0; // noise mode [0 or 1] 0 = smooth, 1 = grainy\n\nin vec2 v_texCoord;\nlayout(location = 0) out vec4 fragColor0;\n\n// High-quality integer hash (pcg-like)\nuint pcg(uint v) {\n uint state = v * 747796405u + 2891336453u;\n uint word = ((state >> ((state >> 28u) + 4u)) ^ state) * 277803737u;\n return (word >> 22u) ^ word;\n}\n\n// 2D -> 1D hash input\nuint hash2d(uvec2 p) {\n return pcg(p.x + pcg(p.y));\n}\n\n// Hash to float [0, 1]\nfloat hashf(uvec2 p) {\n return float(hash2d(p)) / float(0xffffffffu);\n}\n\n// Hash to float with offset (for RGB channels)\nfloat hashf(uvec2 p, uint offset) {\n return float(pcg(hash2d(p) + offset)) / float(0xffffffffu);\n}\n\n// Convert uniform [0,1] to roughly Gaussian distribution\n// Using simple approximation: average of multiple samples\nfloat toGaussian(uvec2 p) {\n float sum = hashf(p, 0u) + hashf(p, 1u) + hashf(p, 2u) + hashf(p, 3u);\n return (sum - 2.0) * 0.7; // Centered, scaled\n}\n\nfloat toGaussian(uvec2 p, uint offset) {\n float sum = hashf(p, offset) + hashf(p, offset + 1u) \n + hashf(p, offset + 2u) + hashf(p, offset + 3u);\n return (sum - 2.0) * 0.7;\n}\n\n// Smooth noise with better interpolation\nfloat smoothNoise(vec2 p) {\n vec2 i = floor(p);\n vec2 f = fract(p);\n \n // Quintic interpolation (less banding than cubic)\n f = f * f * f * (f * (f * 6.0 - 15.0) + 10.0);\n \n uvec2 ui = uvec2(i);\n float a = toGaussian(ui);\n float b = toGaussian(ui + uvec2(1u, 0u));\n float c = toGaussian(ui + uvec2(0u, 1u));\n float d = toGaussian(ui + uvec2(1u, 1u));\n \n return mix(mix(a, b, f.x), mix(c, d, f.x), f.y);\n}\n\nfloat smoothNoise(vec2 p, uint offset) {\n vec2 i = floor(p);\n vec2 f = fract(p);\n \n f = f * f * f * (f * (f * 6.0 - 15.0) + 10.0);\n \n uvec2 ui = uvec2(i);\n float a = toGaussian(ui, offset);\n float b = toGaussian(ui + uvec2(1u, 0u), offset);\n float c = toGaussian(ui + uvec2(0u, 1u), offset);\n float d = toGaussian(ui + uvec2(1u, 1u), offset);\n \n return mix(mix(a, b, f.x), mix(c, d, f.x), f.y);\n}\n\nvoid main() {\n vec4 color = texture(u_image0, v_texCoord);\n \n // Luminance (Rec.709)\n float luma = dot(color.rgb, vec3(0.2126, 0.7152, 0.0722));\n \n // Grain UV (resolution-independent)\n vec2 grainUV = v_texCoord * u_resolution / max(u_float1, 0.01);\n uvec2 grainPixel = uvec2(grainUV);\n \n float g;\n vec3 grainRGB;\n \n if (u_int0 == 1) {\n // Grainy mode: pure hash noise (no interpolation = no banding)\n g = toGaussian(grainPixel);\n grainRGB = vec3(\n toGaussian(grainPixel, 100u),\n toGaussian(grainPixel, 200u),\n toGaussian(grainPixel, 300u)\n );\n } else {\n // Smooth mode: interpolated with quintic curve\n g = smoothNoise(grainUV);\n grainRGB = vec3(\n smoothNoise(grainUV, 100u),\n smoothNoise(grainUV, 200u),\n smoothNoise(grainUV, 300u)\n );\n }\n \n // Luminance weighting (less grain in highlights)\n float lumWeight = mix(1.0, 1.0 - luma, clamp(u_float3, 0.0, 1.0));\n \n // Strength\n float strength = u_float0 * 0.15;\n \n // Color vs monochrome grain\n vec3 grainColor = mix(vec3(g), grainRGB, clamp(u_float2, 0.0, 1.0));\n \n color.rgb += grainColor * strength * lumWeight;\n fragColor0 = vec4(clamp(color.rgb, 0.0, 1.0), color.a);\n}\n", "from_input" ] }, @@ -580,8 +580,9 @@ "extra": { "workflowRendererVersion": "LG" }, - "category": "Image Tools/Color adjust" + "category": "Image Tools/Color adjust", + "description": "Adds procedural film grain texture for a cinematic look via GPU fragment shader." } ] } -} +} \ No newline at end of file diff --git a/blueprints/First-Last-Frame to Video (LTX-2.3).json b/blueprints/First-Last-Frame to Video (LTX-2.3).json index 8ec9ed61a..f509aefe0 100644 --- a/blueprints/First-Last-Frame to Video (LTX-2.3).json +++ b/blueprints/First-Last-Frame to Video (LTX-2.3).json @@ -3350,7 +3350,8 @@ } ], "extra": {}, - "category": "Video generation and editing/First-Last-Frame to Video" + "category": "Video generation and editing/First-Last-Frame to Video", + "description": "Generates a video interpolating between first and last keyframes using LTX-2.3." } ] }, diff --git a/blueprints/Glow.json b/blueprints/Glow.json index 1dafb2d35..2bbfdee51 100644 --- a/blueprints/Glow.json +++ b/blueprints/Glow.json @@ -575,8 +575,9 @@ "extra": { "workflowRendererVersion": "LG" }, - "category": "Image Tools/Color adjust" + "category": "Image Tools/Color adjust", + "description": "Adds a glow/bloom effect around bright image areas via GPU fragment shader." } ] } -} +} \ No newline at end of file diff --git a/blueprints/Hue and Saturation.json b/blueprints/Hue and Saturation.json index 1a2df8937..cddf0154a 100644 --- a/blueprints/Hue and Saturation.json +++ b/blueprints/Hue and Saturation.json @@ -752,8 +752,9 @@ "extra": { "workflowRendererVersion": "LG" }, - "category": "Image Tools/Color adjust" + "category": "Image Tools/Color adjust", + "description": "Adjusts hue, saturation, and lightness of an image using a real-time GPU fragment shader." } ] } -} +} \ No newline at end of file diff --git a/blueprints/Image Blur.json b/blueprints/Image Blur.json index 3c7a784b0..fe3c3dfec 100644 --- a/blueprints/Image Blur.json +++ b/blueprints/Image Blur.json @@ -374,8 +374,9 @@ "extra": { "workflowRendererVersion": "LG" }, - "category": "Image Tools/Blur" + "category": "Image Tools/Blur", + "description": "Applies Gaussian blur to soften an image or simulate depth-of-field effects." } ] } -} +} \ No newline at end of file diff --git a/blueprints/Image Captioning (gemini).json b/blueprints/Image Captioning (gemini).json index 98cfb8999..f515731bf 100644 --- a/blueprints/Image Captioning (gemini).json +++ b/blueprints/Image Captioning (gemini).json @@ -310,8 +310,9 @@ "extra": { "workflowRendererVersion": "LG" }, - "category": "Text generation/Image Captioning" + "category": "Text generation/Image Captioning", + "description": "Generates descriptive captions for images using Google Gemini's multimodal LLM." } ] } -} +} \ No newline at end of file diff --git a/blueprints/Image Channels.json b/blueprints/Image Channels.json index 9c7b675b2..b6fdff5be 100644 --- a/blueprints/Image Channels.json +++ b/blueprints/Image Channels.json @@ -315,8 +315,9 @@ "extra": { "workflowRendererVersion": "LG" }, - "category": "Image Tools/Color adjust" + "category": "Image Tools/Color adjust", + "description": "Manipulates individual RGBA channels for masking, compositing, and channel effects." } ] } -} +} \ No newline at end of file diff --git a/blueprints/Image Edit (FireRed Image Edit 1.1).json b/blueprints/Image Edit (FireRed Image Edit 1.1).json index c34246ce6..14310353c 100644 --- a/blueprints/Image Edit (FireRed Image Edit 1.1).json +++ b/blueprints/Image Edit (FireRed Image Edit 1.1).json @@ -2138,7 +2138,8 @@ "extra": { "workflowRendererVersion": "LG" }, - "category": "Image generation and editing/Edit image" + "category": "Image generation and editing/Edit image", + "description": "Edits images via text instructions using FireRed Image Edit 1.1, a diffusion-based instruction-following editing model." } ] }, diff --git a/blueprints/Image Edit (Flux.2 Klein 4B).json b/blueprints/Image Edit (Flux.2 Klein 4B).json index 6f2f7dc01..da50ef0ba 100644 --- a/blueprints/Image Edit (Flux.2 Klein 4B).json +++ b/blueprints/Image Edit (Flux.2 Klein 4B).json @@ -1472,7 +1472,8 @@ "extra": { "workflowRendererVersion": "LG" }, - "category": "Image generation and editing/Edit image" + "category": "Image generation and editing/Edit image", + "description": "Edits images via text instructions using FLUX.2 [klein] 4B, supporting both T2I and image editing." }, { "id": "6007e698-2ebd-4917-84d8-299b35d7b7ab", @@ -1821,7 +1822,8 @@ ], "extra": { "workflowRendererVersion": "LG" - } + }, + "description": "Applies reference image conditioning for style/identity transfer (Flux.2 Klein 4B)." } ] }, diff --git a/blueprints/Image Edit (LongCat Image Edit).json b/blueprints/Image Edit (LongCat Image Edit).json index 5b4eb18f0..de1c155a2 100644 --- a/blueprints/Image Edit (LongCat Image Edit).json +++ b/blueprints/Image Edit (LongCat Image Edit).json @@ -1417,7 +1417,8 @@ } ], "extra": {}, - "category": "Image generation and editing/Edit image" + "category": "Image generation and editing/Edit image", + "description": "Edits images via text instructions using LongCat Image Edit, an instruction-following image editing diffusion model." } ] }, diff --git a/blueprints/Image Edit (Qwen 2511).json b/blueprints/Image Edit (Qwen 2511).json index 582171fa0..d37fab1aa 100644 --- a/blueprints/Image Edit (Qwen 2511).json +++ b/blueprints/Image Edit (Qwen 2511).json @@ -1468,7 +1468,8 @@ "VHS_MetadataImage": true, "VHS_KeepIntermediate": true }, - "category": "Image generation and editing/Edit image" + "category": "Image generation and editing/Edit image", + "description": "Edits images via text instructions using Qwen-Image-Edit-2511 with improved character consistency and integrated LoRA." } ] }, @@ -1489,4 +1490,4 @@ } }, "version": 0.4 -} +} \ No newline at end of file diff --git a/blueprints/Image Inpainting (Flux.1 Fill Dev).json b/blueprints/Image Inpainting (Flux.1 Fill Dev).json index d40d63594..891e75ed0 100644 --- a/blueprints/Image Inpainting (Flux.1 Fill Dev).json +++ b/blueprints/Image Inpainting (Flux.1 Fill Dev).json @@ -1188,7 +1188,8 @@ "extra": { "workflowRendererVersion": "LG" }, - "category": "Image generation and editing/Inpaint image" + "category": "Image generation and editing/Inpaint image", + "description": "Inpaints masked image regions using Flux.1 fill [dev], BFL's inpainting/outpainting model." } ] }, diff --git a/blueprints/Image Inpainting (Qwen-image).json b/blueprints/Image Inpainting (Qwen-image).json index 95b2909fa..a06d57e19 100644 --- a/blueprints/Image Inpainting (Qwen-image).json +++ b/blueprints/Image Inpainting (Qwen-image).json @@ -1548,7 +1548,8 @@ "extra": { "workflowRendererVersion": "LG" }, - "category": "Image generation and editing/Inpaint image" + "category": "Image generation and editing/Inpaint image", + "description": "Inpaints masked regions using Qwen-Image, extending its multilingual text rendering to inpainting tasks." }, { "id": "56a1f603-fbd2-40ed-94ef-c9ecbd96aca8", @@ -1907,7 +1908,8 @@ ], "extra": { "workflowRendererVersion": "LG" - } + }, + "description": "Expands and softens mask edges to reduce visible seams after image processing." } ] }, diff --git a/blueprints/Image Levels.json b/blueprints/Image Levels.json index ef256a1aa..1a1b18932 100644 --- a/blueprints/Image Levels.json +++ b/blueprints/Image Levels.json @@ -742,9 +742,10 @@ "extra": { "workflowRendererVersion": "LG" }, - "category": "Image Tools/Color adjust" + "category": "Image Tools/Color adjust", + "description": "Adjusts black point, white point, and gamma for tonal range control via GPU shader." } ] }, "extra": {} -} +} \ No newline at end of file diff --git a/blueprints/Image Outpainting (Qwen-Image).json b/blueprints/Image Outpainting (Qwen-Image).json index 218fdc775..6c07227c0 100644 --- a/blueprints/Image Outpainting (Qwen-Image).json +++ b/blueprints/Image Outpainting (Qwen-Image).json @@ -1919,7 +1919,8 @@ "extra": { "workflowRendererVersion": "LG" }, - "category": "Image generation and editing/Outpaint image" + "category": "Image generation and editing/Outpaint image", + "description": "Outpaints beyond image boundaries using Qwen-Image's outpainting capabilities." }, { "id": "f93c215e-c393-460e-9534-ed2c3d8a652e", @@ -2278,7 +2279,8 @@ ], "extra": { "workflowRendererVersion": "LG" - } + }, + "description": "Expands and softens mask edges to reduce visible seams after image processing." }, { "id": "2a4b2cc0-db37-4302-a067-da392f38f06b", @@ -2733,7 +2735,8 @@ ], "extra": { "workflowRendererVersion": "LG" - } + }, + "description": "Scales both image and mask together while preserving alignment for editing workflows." } ] }, diff --git a/blueprints/Image Upscale(Z-image-Turbo).json b/blueprints/Image Upscale(Z-image-Turbo).json index 0d2b6e240..baa48f53e 100644 --- a/blueprints/Image Upscale(Z-image-Turbo).json +++ b/blueprints/Image Upscale(Z-image-Turbo).json @@ -1302,7 +1302,8 @@ "extra": { "workflowRendererVersion": "LG" }, - "category": "Image generation and editing/Enhance" + "category": "Image generation and editing/Enhance", + "description": "Upscales images to higher resolution using Z-Image-Turbo." } ] }, @@ -1311,4 +1312,4 @@ "workflowRendererVersion": "LG" }, "version": 0.4 -} +} \ No newline at end of file diff --git a/blueprints/Image to Depth Map (Lotus).json b/blueprints/Image to Depth Map (Lotus).json index 089f2cd42..036b1834f 100644 --- a/blueprints/Image to Depth Map (Lotus).json +++ b/blueprints/Image to Depth Map (Lotus).json @@ -948,7 +948,8 @@ "extra": { "workflowRendererVersion": "LG" }, - "category": "Image generation and editing/Depth to image" + "category": "Image generation and editing/Depth to image", + "description": "Estimates a monocular depth map from an input image using the Lotus depth estimation model." } ] }, @@ -964,4 +965,4 @@ "workflowRendererVersion": "LG" }, "version": 0.4 -} +} \ No newline at end of file diff --git a/blueprints/Image to Layers(Qwen-Image-Layered).json b/blueprints/Image to Layers(Qwen-Image-Layered).json index 8a525e7a5..7b44f0563 100644 --- a/blueprints/Image to Layers(Qwen-Image-Layered).json +++ b/blueprints/Image to Layers(Qwen-Image-Layered).json @@ -1586,7 +1586,8 @@ "extra": { "workflowRendererVersion": "LG" }, - "category": "Image generation and editing/Image to layers" + "category": "Image generation and editing/Image to layers", + "description": "Decomposes an image into variable-resolution RGBA layers for independent editing using Qwen-Image-Layered." } ] }, diff --git a/blueprints/Image to Model (Hunyuan3d 2.1).json b/blueprints/Image to Model (Hunyuan3d 2.1).json index 4705603a8..818bb71ba 100644 --- a/blueprints/Image to Model (Hunyuan3d 2.1).json +++ b/blueprints/Image to Model (Hunyuan3d 2.1).json @@ -765,7 +765,8 @@ "extra": { "workflowRendererVersion": "LG" }, - "category": "3D/Image to 3D Model" + "category": "3D/Image to 3D Model", + "description": "Generates 3D mesh models from a single input image using Hunyuan3D 2.0/2.1." } ] }, @@ -781,4 +782,4 @@ "workflowRendererVersion": "LG" }, "version": 0.4 -} +} \ No newline at end of file diff --git a/blueprints/Image to Video (LTX-2.3).json b/blueprints/Image to Video (LTX-2.3).json index 86a601130..3db524ea0 100644 --- a/blueprints/Image to Video (LTX-2.3).json +++ b/blueprints/Image to Video (LTX-2.3).json @@ -4223,7 +4223,8 @@ "extra": { "workflowRendererVersion": "Vue-corrected" }, - "category": "Video generation and editing/Image to video" + "category": "Video generation and editing/Image to video", + "description": "Generates video from a single input image using LTX-2.3." } ] }, diff --git a/blueprints/Image to Video (Wan 2.2).json b/blueprints/Image to Video (Wan 2.2).json index a8dafd3c9..de9093d6b 100644 --- a/blueprints/Image to Video (Wan 2.2).json +++ b/blueprints/Image to Video (Wan 2.2).json @@ -825,7 +825,7 @@ } }, "widgets_values": [ - "色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走" + "\u8272\u8c03\u8273\u4e3d\uff0c\u8fc7\u66dd\uff0c\u9759\u6001\uff0c\u7ec6\u8282\u6a21\u7cca\u4e0d\u6e05\uff0c\u5b57\u5e55\uff0c\u98ce\u683c\uff0c\u4f5c\u54c1\uff0c\u753b\u4f5c\uff0c\u753b\u9762\uff0c\u9759\u6b62\uff0c\u6574\u4f53\u53d1\u7070\uff0c\u6700\u5dee\u8d28\u91cf\uff0c\u4f4e\u8d28\u91cf\uff0cJPEG\u538b\u7f29\u6b8b\u7559\uff0c\u4e11\u964b\u7684\uff0c\u6b8b\u7f3a\u7684\uff0c\u591a\u4f59\u7684\u624b\u6307\uff0c\u753b\u5f97\u4e0d\u597d\u7684\u624b\u90e8\uff0c\u753b\u5f97\u4e0d\u597d\u7684\u8138\u90e8\uff0c\u7578\u5f62\u7684\uff0c\u6bc1\u5bb9\u7684\uff0c\u5f62\u6001\u7578\u5f62\u7684\u80a2\u4f53\uff0c\u624b\u6307\u878d\u5408\uff0c\u9759\u6b62\u4e0d\u52a8\u7684\u753b\u9762\uff0c\u6742\u4e71\u7684\u80cc\u666f\uff0c\u4e09\u6761\u817f\uff0c\u80cc\u666f\u4eba\u5f88\u591a\uff0c\u5012\u7740\u8d70" ], "color": "#322", "bgcolor": "#533" @@ -1525,7 +1525,7 @@ } }, "widgets_values": [ - "## GPU:RTX4090D 24GB\n\n| Model | Size |VRAM Usage | 1st Generation | 2nd Generation |\n|---------------------|-------|-----------|---------------|-----------------|\n| fp8_scaled |640*640| 84% | ≈ 536s | ≈ 513s |\n| fp8_scaled + 4steps LoRA | 640*640 | 83% | ≈ 97s | ≈ 71s |" + "## GPU:RTX4090D 24GB\n\n| Model | Size |VRAM Usage | 1st Generation | 2nd Generation |\n|---------------------|-------|-----------|---------------|-----------------|\n| fp8_scaled |640*640| 84% | \u2248 536s | \u2248 513s |\n| fp8_scaled + 4steps LoRA | 640*640 | 83% | \u2248 97s | \u2248 71s |" ], "color": "#222", "bgcolor": "#000" @@ -1555,7 +1555,7 @@ } }, "widgets_values": [ - "[Tutorial](https://docs.comfy.org/tutorials/video/wan/wan2_2\n)\n\n**Diffusion Model**\n- [wan2.2_i2v_high_noise_14B_fp8_scaled.safetensors](https://huggingface.co/Comfy-Org/Wan_2.2_ComfyUI_Repackaged/resolve/main/split_files/diffusion_models/wan2.2_i2v_high_noise_14B_fp8_scaled.safetensors)\n- [wan2.2_i2v_low_noise_14B_fp8_scaled.safetensors](https://huggingface.co/Comfy-Org/Wan_2.2_ComfyUI_Repackaged/resolve/main/split_files/diffusion_models/wan2.2_i2v_low_noise_14B_fp8_scaled.safetensors)\n\n**LoRA**\n- [wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors](https://huggingface.co/Comfy-Org/Wan_2.2_ComfyUI_Repackaged/resolve/main/split_files/loras/wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors)\n- [wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors](https://huggingface.co/Comfy-Org/Wan_2.2_ComfyUI_Repackaged/resolve/main/split_files/loras/wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors)\n\n**VAE**\n- [wan_2.1_vae.safetensors](https://huggingface.co/Comfy-Org/Wan_2.2_ComfyUI_Repackaged/resolve/main/split_files/vae/wan_2.1_vae.safetensors)\n\n**Text Encoder** \n- [umt5_xxl_fp8_e4m3fn_scaled.safetensors](https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/resolve/main/split_files/text_encoders/umt5_xxl_fp8_e4m3fn_scaled.safetensors)\n\n\nFile save location\n\n```\nComfyUI/\n├───📂 models/\n│ ├───📂 diffusion_models/\n│ │ ├─── wan2.2_i2v_low_noise_14B_fp8_scaled.safetensors\n│ │ └─── wan2.2_i2v_high_noise_14B_fp8_scaled.safetensors\n│ ├───📂 loras/\n│ │ ├─── wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors\n│ │ └─── wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors\n│ ├───📂 text_encoders/\n│ │ └─── umt5_xxl_fp8_e4m3fn_scaled.safetensors \n│ └───📂 vae/\n│ └── wan_2.1_vae.safetensors\n```\n" + "[Tutorial](https://docs.comfy.org/tutorials/video/wan/wan2_2\n)\n\n**Diffusion Model**\n- [wan2.2_i2v_high_noise_14B_fp8_scaled.safetensors](https://huggingface.co/Comfy-Org/Wan_2.2_ComfyUI_Repackaged/resolve/main/split_files/diffusion_models/wan2.2_i2v_high_noise_14B_fp8_scaled.safetensors)\n- [wan2.2_i2v_low_noise_14B_fp8_scaled.safetensors](https://huggingface.co/Comfy-Org/Wan_2.2_ComfyUI_Repackaged/resolve/main/split_files/diffusion_models/wan2.2_i2v_low_noise_14B_fp8_scaled.safetensors)\n\n**LoRA**\n- [wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors](https://huggingface.co/Comfy-Org/Wan_2.2_ComfyUI_Repackaged/resolve/main/split_files/loras/wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors)\n- [wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors](https://huggingface.co/Comfy-Org/Wan_2.2_ComfyUI_Repackaged/resolve/main/split_files/loras/wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors)\n\n**VAE**\n- [wan_2.1_vae.safetensors](https://huggingface.co/Comfy-Org/Wan_2.2_ComfyUI_Repackaged/resolve/main/split_files/vae/wan_2.1_vae.safetensors)\n\n**Text Encoder** \n- [umt5_xxl_fp8_e4m3fn_scaled.safetensors](https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/resolve/main/split_files/text_encoders/umt5_xxl_fp8_e4m3fn_scaled.safetensors)\n\n\nFile save location\n\n```\nComfyUI/\n\u251c\u2500\u2500\u2500\ud83d\udcc2 models/\n\u2502 \u251c\u2500\u2500\u2500\ud83d\udcc2 diffusion_models/\n\u2502 \u2502 \u251c\u2500\u2500\u2500 wan2.2_i2v_low_noise_14B_fp8_scaled.safetensors\n\u2502 \u2502 \u2514\u2500\u2500\u2500 wan2.2_i2v_high_noise_14B_fp8_scaled.safetensors\n\u2502 \u251c\u2500\u2500\u2500\ud83d\udcc2 loras/\n\u2502 \u2502 \u251c\u2500\u2500\u2500 wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors\n\u2502 \u2502 \u2514\u2500\u2500\u2500 wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors\n\u2502 \u251c\u2500\u2500\u2500\ud83d\udcc2 text_encoders/\n\u2502 \u2502 \u2514\u2500\u2500\u2500 umt5_xxl_fp8_e4m3fn_scaled.safetensors \n\u2502 \u2514\u2500\u2500\u2500\ud83d\udcc2 vae/\n\u2502 \u2514\u2500\u2500 wan_2.1_vae.safetensors\n```\n" ], "color": "#222", "bgcolor": "#000" @@ -2027,7 +2027,8 @@ "extra": { "workflowRendererVersion": "LG" }, - "category": "Video generation and editing/Image to video" + "category": "Video generation and editing/Image to video", + "description": "Generates video from an image and text prompt using Wan2.2, supporting T2V and I2V." } ] }, @@ -2049,4 +2050,4 @@ "ue_links": [] }, "version": 0.4 -} +} \ No newline at end of file diff --git a/blueprints/Pose to Image (Z-Image-Turbo).json b/blueprints/Pose to Image (Z-Image-Turbo).json index a55410ba4..f89eec686 100644 --- a/blueprints/Pose to Image (Z-Image-Turbo).json +++ b/blueprints/Pose to Image (Z-Image-Turbo).json @@ -1298,7 +1298,8 @@ "VHS_MetadataImage": true, "VHS_KeepIntermediate": true }, - "category": "Image generation and editing/Pose to image" + "category": "Image generation and editing/Pose to image", + "description": "Generates an image from pose keypoints using Z-Image-Turbo with text conditioning." } ] }, @@ -1319,4 +1320,4 @@ } }, "version": 0.4 -} +} \ No newline at end of file diff --git a/blueprints/Pose to Video (LTX 2.0).json b/blueprints/Pose to Video (LTX 2.0).json index 580900bc0..1ce49351a 100644 --- a/blueprints/Pose to Video (LTX 2.0).json +++ b/blueprints/Pose to Video (LTX 2.0).json @@ -3870,7 +3870,8 @@ "extra": { "workflowRendererVersion": "LG" }, - "category": "Video generation and editing/Pose to video" + "category": "Video generation and editing/Pose to video", + "description": "Generates video from pose reference frames using LTX-2, with optional synchronized audio." } ] }, diff --git a/blueprints/Prompt Enhance.json b/blueprints/Prompt Enhance.json index 5e57548ff..e260b1203 100644 --- a/blueprints/Prompt Enhance.json +++ b/blueprints/Prompt Enhance.json @@ -270,9 +270,10 @@ "extra": { "workflowRendererVersion": "LG" }, - "category": "Text generation/Prompt enhance" + "category": "Text generation/Prompt enhance", + "description": "Expands short text prompts into detailed descriptions using a text generation model for better generation quality." } ] }, "extra": {} -} +} \ No newline at end of file diff --git a/blueprints/Sharpen.json b/blueprints/Sharpen.json index f332400fd..9f56f8ca6 100644 --- a/blueprints/Sharpen.json +++ b/blueprints/Sharpen.json @@ -267,7 +267,7 @@ "Node name for S&R": "GLSLShader" }, "widgets_values": [ - "#version 300 es\nprecision highp float;\n\nuniform sampler2D u_image0;\nuniform float u_float0; // strength [0.0 – 2.0] typical: 0.3–1.0\n\nin vec2 v_texCoord;\nlayout(location = 0) out vec4 fragColor0;\n\nvoid main() {\n vec2 texel = 1.0 / vec2(textureSize(u_image0, 0));\n \n // Sample center and neighbors\n vec4 center = texture(u_image0, v_texCoord);\n vec4 top = texture(u_image0, v_texCoord + vec2( 0.0, -texel.y));\n vec4 bottom = texture(u_image0, v_texCoord + vec2( 0.0, texel.y));\n vec4 left = texture(u_image0, v_texCoord + vec2(-texel.x, 0.0));\n vec4 right = texture(u_image0, v_texCoord + vec2( texel.x, 0.0));\n \n // Edge enhancement (Laplacian)\n vec4 edges = center * 4.0 - top - bottom - left - right;\n \n // Add edges back scaled by strength\n vec4 sharpened = center + edges * u_float0;\n \n fragColor0 = vec4(clamp(sharpened.rgb, 0.0, 1.0), center.a);\n}", + "#version 300 es\nprecision highp float;\n\nuniform sampler2D u_image0;\nuniform float u_float0; // strength [0.0 \u2013 2.0] typical: 0.3\u20131.0\n\nin vec2 v_texCoord;\nlayout(location = 0) out vec4 fragColor0;\n\nvoid main() {\n vec2 texel = 1.0 / vec2(textureSize(u_image0, 0));\n \n // Sample center and neighbors\n vec4 center = texture(u_image0, v_texCoord);\n vec4 top = texture(u_image0, v_texCoord + vec2( 0.0, -texel.y));\n vec4 bottom = texture(u_image0, v_texCoord + vec2( 0.0, texel.y));\n vec4 left = texture(u_image0, v_texCoord + vec2(-texel.x, 0.0));\n vec4 right = texture(u_image0, v_texCoord + vec2( texel.x, 0.0));\n \n // Edge enhancement (Laplacian)\n vec4 edges = center * 4.0 - top - bottom - left - right;\n \n // Add edges back scaled by strength\n vec4 sharpened = center + edges * u_float0;\n \n fragColor0 = vec4(clamp(sharpened.rgb, 0.0, 1.0), center.a);\n}", "from_input" ] } @@ -302,8 +302,9 @@ "extra": { "workflowRendererVersion": "LG" }, - "category": "Image Tools/Sharpen" + "category": "Image Tools/Sharpen", + "description": "Sharpens image details using a GPU fragment shader for enhanced clarity." } ] } -} +} \ No newline at end of file diff --git a/blueprints/Text to Audio (ACE-Step 1.5).json b/blueprints/Text to Audio (ACE-Step 1.5).json index 206cf16be..4c8789fee 100644 --- a/blueprints/Text to Audio (ACE-Step 1.5).json +++ b/blueprints/Text to Audio (ACE-Step 1.5).json @@ -1502,7 +1502,8 @@ "extra": { "workflowRendererVersion": "LG" }, - "category": "Audio/Music generation" + "category": "Audio/Music generation", + "description": "Generates audio/music from text prompts using ACE-Step 1.5, a diffusion-based audio generation model." } ] }, @@ -1518,4 +1519,4 @@ } }, "version": 0.4 -} +} \ No newline at end of file diff --git a/blueprints/Text to Image (Flux.1 Dev).json b/blueprints/Text to Image (Flux.1 Dev).json index 04c3cb95a..95cd23d04 100644 --- a/blueprints/Text to Image (Flux.1 Dev).json +++ b/blueprints/Text to Image (Flux.1 Dev).json @@ -1029,7 +1029,8 @@ "extra": { "workflowRendererVersion": "LG" }, - "category": "Image generation and editing/Text to image" + "category": "Image generation and editing/Text to image", + "description": "Generates images from text prompts using Flux.1 [dev], BFL's 12B diffusion model." } ] }, diff --git a/blueprints/Text to Image (Flux.1 Krea Dev).json b/blueprints/Text to Image (Flux.1 Krea Dev).json index fe4db1cfc..e8a56a31b 100644 --- a/blueprints/Text to Image (Flux.1 Krea Dev).json +++ b/blueprints/Text to Image (Flux.1 Krea Dev).json @@ -1023,7 +1023,8 @@ "extra": { "workflowRendererVersion": "LG" }, - "category": "Image generation and editing/Text to image" + "category": "Image generation and editing/Text to image", + "description": "Generates images from text prompts using Flux.1 Krea Dev, a BFL \u00d7 Krea collaboration variant." } ] }, diff --git a/blueprints/Text to Image (NetaYume Lumina).json b/blueprints/Text to Image (NetaYume Lumina).json index 394ad1608..abad72b3a 100644 --- a/blueprints/Text to Image (NetaYume Lumina).json +++ b/blueprints/Text to Image (NetaYume Lumina).json @@ -1104,7 +1104,8 @@ "extra": { "workflowRendererVersion": "LG" }, - "category": "Image generation and editing/Text to image" + "category": "Image generation and editing/Text to image", + "description": "Generates images from text prompts using NetaYume Lumina, a Lumina-Next variant fine-tuned for anime-style and illustration generation." }, { "id": "a07fdf06-1bda-4dac-bdbd-63ee8ebca1c9", @@ -1458,7 +1459,8 @@ ], "extra": { "workflowRendererVersion": "LG" - } + }, + "description": "Encodes a negative text prompt via CLIP for classifier-free guidance in anime-style generation (NetaYume Lumina)." } ] }, diff --git a/blueprints/Text to Image (Qwen-Image 2512).json b/blueprints/Text to Image (Qwen-Image 2512).json index f52ea2ef2..9e666de95 100644 --- a/blueprints/Text to Image (Qwen-Image 2512).json +++ b/blueprints/Text to Image (Qwen-Image 2512).json @@ -751,7 +751,7 @@ "secondTabWidth": 65 }, "widgets_values": [ - "低分辨率,低画质,肢体畸形,手指畸形,画面过饱和,蜡像感,人脸无细节,过度光滑,画面具有AI感。构图混乱。文字模糊,扭曲" + "\u4f4e\u5206\u8fa8\u7387\uff0c\u4f4e\u753b\u8d28\uff0c\u80a2\u4f53\u7578\u5f62\uff0c\u624b\u6307\u7578\u5f62\uff0c\u753b\u9762\u8fc7\u9971\u548c\uff0c\u8721\u50cf\u611f\uff0c\u4eba\u8138\u65e0\u7ec6\u8282\uff0c\u8fc7\u5ea6\u5149\u6ed1\uff0c\u753b\u9762\u5177\u6709AI\u611f\u3002\u6784\u56fe\u6df7\u4e71\u3002\u6587\u5b57\u6a21\u7cca\uff0c\u626d\u66f2" ], "color": "#322", "bgcolor": "#533" @@ -1941,7 +1941,8 @@ "extra": { "workflowRendererVersion": "Vue-corrected" }, - "category": "Image generation and editing/Text to image" + "category": "Image generation and editing/Text to image", + "description": "Generates images from text prompts using Qwen-Image-2512, with enhanced human realism and finer natural detail over the base version." } ] }, diff --git a/blueprints/Text to Image (Qwen-Image).json b/blueprints/Text to Image (Qwen-Image).json index 70b4b44b3..e78d5a962 100644 --- a/blueprints/Text to Image (Qwen-Image).json +++ b/blueprints/Text to Image (Qwen-Image).json @@ -1873,7 +1873,8 @@ "extra": { "workflowRendererVersion": "LG" }, - "category": "Image generation and editing/Text to image" + "category": "Image generation and editing/Text to image", + "description": "Generates images from text prompts using Qwen-Image, Alibaba's 20B MMDiT model with excellent multilingual text rendering." } ] }, diff --git a/blueprints/Text to Image (Z-Image-Turbo).json b/blueprints/Text to Image (Z-Image-Turbo).json index 6aa80e327..913aaa303 100644 --- a/blueprints/Text to Image (Z-Image-Turbo).json +++ b/blueprints/Text to Image (Z-Image-Turbo).json @@ -1054,7 +1054,8 @@ "extra": { "workflowRendererVersion": "LG" }, - "category": "Image generation and editing/Text to image" + "category": "Image generation and editing/Text to image", + "description": "Generates images from text prompts using Z-Image-Turbo, Alibaba's distilled 6B DiT model." } ] }, @@ -1075,4 +1076,4 @@ } }, "version": 0.4 -} +} \ No newline at end of file diff --git a/blueprints/Text to Video (LTX-2.3).json b/blueprints/Text to Video (LTX-2.3).json index ff9bc6ccf..f44a216dd 100644 --- a/blueprints/Text to Video (LTX-2.3).json +++ b/blueprints/Text to Video (LTX-2.3).json @@ -4286,7 +4286,8 @@ "extra": { "workflowRendererVersion": "Vue-corrected" }, - "category": "Video generation and editing/Text to video" + "category": "Video generation and editing/Text to video", + "description": "Generates video from text prompts using LTX-2.3, Lightricks' video diffusion model." } ] }, diff --git a/blueprints/Text to Video (Wan 2.2).json b/blueprints/Text to Video (Wan 2.2).json index 0ce485b67..d519461eb 100644 --- a/blueprints/Text to Video (Wan 2.2).json +++ b/blueprints/Text to Video (Wan 2.2).json @@ -1160,7 +1160,7 @@ "title": "Model Links", "properties": {}, "widgets_values": [ - "[Tutorial](https://docs.comfy.org/tutorials/video/wan/wan2_2\n) | [教程](https://docs.comfy.org/zh-CN/tutorials/video/wan/wan2_2\n)\n\n**Diffusion Model** \n- [wan2.2_t2v_high_noise_14B_fp8_scaled.safetensors](https://huggingface.co/Comfy-Org/Wan_2.2_ComfyUI_Repackaged/resolve/main/split_files/diffusion_models/wan2.2_t2v_high_noise_14B_fp8_scaled.safetensors)\n- [wan2.2_t2v_low_noise_14B_fp8_scaled.safetensors](https://huggingface.co/Comfy-Org/Wan_2.2_ComfyUI_Repackaged/resolve/main/split_files/diffusion_models/wan2.2_t2v_low_noise_14B_fp8_scaled.safetensors)\n\n**LoRA**\n\n- [wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors](https://huggingface.co/Comfy-Org/Wan_2.2_ComfyUI_Repackaged/resolve/main/split_files/loras/wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors)\n- [wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors](https://huggingface.co/Comfy-Org/Wan_2.2_ComfyUI_Repackaged/resolve/main/split_files/loras/wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors)\n\n**VAE**\n- [wan_2.1_vae.safetensors](https://huggingface.co/Comfy-Org/Wan_2.2_ComfyUI_Repackaged/resolve/main/split_files/vae/wan_2.1_vae.safetensors)\n\n**Text Encoder** \n- [umt5_xxl_fp8_e4m3fn_scaled.safetensors](https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/resolve/main/split_files/text_encoders/umt5_xxl_fp8_e4m3fn_scaled.safetensors)\n\n\nFile save location\n\n```\nComfyUI/\n├───📂 models/\n│ ├───📂 diffusion_models/\n│ │ ├─── wan2.2_t2v_low_noise_14B_fp8_scaled.safetensors\n│ │ └─── wan2.2_t2v_high_noise_14B_fp8_scaled.safetensors\n│ ├───📂 loras/\n│ │ ├───wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors\n│ │ └───wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors\n│ ├───📂 text_encoders/\n│ │ └─── umt5_xxl_fp8_e4m3fn_scaled.safetensors \n│ └───📂 vae/\n│ └── wan_2.1_vae.safetensors\n```\n" + "[Tutorial](https://docs.comfy.org/tutorials/video/wan/wan2_2\n) | [\u6559\u7a0b](https://docs.comfy.org/zh-CN/tutorials/video/wan/wan2_2\n)\n\n**Diffusion Model** \n- [wan2.2_t2v_high_noise_14B_fp8_scaled.safetensors](https://huggingface.co/Comfy-Org/Wan_2.2_ComfyUI_Repackaged/resolve/main/split_files/diffusion_models/wan2.2_t2v_high_noise_14B_fp8_scaled.safetensors)\n- [wan2.2_t2v_low_noise_14B_fp8_scaled.safetensors](https://huggingface.co/Comfy-Org/Wan_2.2_ComfyUI_Repackaged/resolve/main/split_files/diffusion_models/wan2.2_t2v_low_noise_14B_fp8_scaled.safetensors)\n\n**LoRA**\n\n- [wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors](https://huggingface.co/Comfy-Org/Wan_2.2_ComfyUI_Repackaged/resolve/main/split_files/loras/wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors)\n- [wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors](https://huggingface.co/Comfy-Org/Wan_2.2_ComfyUI_Repackaged/resolve/main/split_files/loras/wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors)\n\n**VAE**\n- [wan_2.1_vae.safetensors](https://huggingface.co/Comfy-Org/Wan_2.2_ComfyUI_Repackaged/resolve/main/split_files/vae/wan_2.1_vae.safetensors)\n\n**Text Encoder** \n- [umt5_xxl_fp8_e4m3fn_scaled.safetensors](https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/resolve/main/split_files/text_encoders/umt5_xxl_fp8_e4m3fn_scaled.safetensors)\n\n\nFile save location\n\n```\nComfyUI/\n\u251c\u2500\u2500\u2500\ud83d\udcc2 models/\n\u2502 \u251c\u2500\u2500\u2500\ud83d\udcc2 diffusion_models/\n\u2502 \u2502 \u251c\u2500\u2500\u2500 wan2.2_t2v_low_noise_14B_fp8_scaled.safetensors\n\u2502 \u2502 \u2514\u2500\u2500\u2500 wan2.2_t2v_high_noise_14B_fp8_scaled.safetensors\n\u2502 \u251c\u2500\u2500\u2500\ud83d\udcc2 loras/\n\u2502 \u2502 \u251c\u2500\u2500\u2500wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors\n\u2502 \u2502 \u2514\u2500\u2500\u2500wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors\n\u2502 \u251c\u2500\u2500\u2500\ud83d\udcc2 text_encoders/\n\u2502 \u2502 \u2514\u2500\u2500\u2500 umt5_xxl_fp8_e4m3fn_scaled.safetensors \n\u2502 \u2514\u2500\u2500\u2500\ud83d\udcc2 vae/\n\u2502 \u2514\u2500\u2500 wan_2.1_vae.safetensors\n```\n" ], "color": "#222", "bgcolor": "#000" @@ -1263,7 +1263,7 @@ "Node name for S&R": "CLIPTextEncode" }, "widgets_values": [ - "色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走,裸露,NSFW" + "\u8272\u8c03\u8273\u4e3d\uff0c\u8fc7\u66dd\uff0c\u9759\u6001\uff0c\u7ec6\u8282\u6a21\u7cca\u4e0d\u6e05\uff0c\u5b57\u5e55\uff0c\u98ce\u683c\uff0c\u4f5c\u54c1\uff0c\u753b\u4f5c\uff0c\u753b\u9762\uff0c\u9759\u6b62\uff0c\u6574\u4f53\u53d1\u7070\uff0c\u6700\u5dee\u8d28\u91cf\uff0c\u4f4e\u8d28\u91cf\uff0cJPEG\u538b\u7f29\u6b8b\u7559\uff0c\u4e11\u964b\u7684\uff0c\u6b8b\u7f3a\u7684\uff0c\u591a\u4f59\u7684\u624b\u6307\uff0c\u753b\u5f97\u4e0d\u597d\u7684\u624b\u90e8\uff0c\u753b\u5f97\u4e0d\u597d\u7684\u8138\u90e8\uff0c\u7578\u5f62\u7684\uff0c\u6bc1\u5bb9\u7684\uff0c\u5f62\u6001\u7578\u5f62\u7684\u80a2\u4f53\uff0c\u624b\u6307\u878d\u5408\uff0c\u9759\u6b62\u4e0d\u52a8\u7684\u753b\u9762\uff0c\u6742\u4e71\u7684\u80cc\u666f\uff0c\u4e09\u6761\u817f\uff0c\u80cc\u666f\u4eba\u5f88\u591a\uff0c\u5012\u7740\u8d70\uff0c\u88f8\u9732\uff0cNSFW" ], "color": "#322", "bgcolor": "#533" @@ -1572,7 +1572,8 @@ "extra": { "workflowRendererVersion": "LG" }, - "category": "Video generation and editing/Text to video" + "category": "Video generation and editing/Text to video", + "description": "Generates video from text prompts using Wan2.2, Alibaba's diffusion video model." } ] }, @@ -1586,4 +1587,4 @@ "VHS_KeepIntermediate": true }, "version": 0.4 -} +} \ No newline at end of file diff --git a/blueprints/Unsharp Mask.json b/blueprints/Unsharp Mask.json index 137acaa43..79a4c954f 100644 --- a/blueprints/Unsharp Mask.json +++ b/blueprints/Unsharp Mask.json @@ -434,8 +434,9 @@ "extra": { "workflowRendererVersion": "LG" }, - "category": "Image Tools/Sharpen" + "category": "Image Tools/Sharpen", + "description": "Enhances edge contrast via unsharp masking for a sharper image appearance." } ] } -} +} \ No newline at end of file diff --git a/blueprints/Video Captioning (Gemini).json b/blueprints/Video Captioning (Gemini).json index ea6dc8bee..688371483 100644 --- a/blueprints/Video Captioning (Gemini).json +++ b/blueprints/Video Captioning (Gemini).json @@ -307,8 +307,9 @@ "extra": { "workflowRendererVersion": "LG" }, - "category": "Text generation/Video Captioning" + "category": "Text generation/Video Captioning", + "description": "Generates descriptive captions for video input using Google Gemini's multimodal LLM." } ] } -} +} \ No newline at end of file diff --git a/blueprints/Video Inpaint(Wan2.1 VACE).json b/blueprints/Video Inpaint(Wan2.1 VACE).json index f404e6773..dbabcd850 100644 --- a/blueprints/Video Inpaint(Wan2.1 VACE).json +++ b/blueprints/Video Inpaint(Wan2.1 VACE).json @@ -1116,7 +1116,7 @@ "title": "About Video Size", "properties": {}, "widgets_values": [ - "| Model | 480P | 720P |\n| ------------------------------------------------------------ | ---- | ---- |\n| [VACE-1.3B](https://huggingface.co/Wan-AI/Wan2.1-VACE-1.3B) | ✅ | ❌ |\n| [VACE-14B](https://huggingface.co/Wan-AI/Wan2.1-VACE-14B) | ✅ | ✅ |" + "| Model | 480P | 720P |\n| ------------------------------------------------------------ | ---- | ---- |\n| [VACE-1.3B](https://huggingface.co/Wan-AI/Wan2.1-VACE-1.3B) | \u2705 | \u274c |\n| [VACE-14B](https://huggingface.co/Wan-AI/Wan2.1-VACE-14B) | \u2705 | \u2705 |" ], "color": "#432", "bgcolor": "#000" @@ -1516,7 +1516,7 @@ "widget_ue_connectable": {} }, "widgets_values": [ - "Currently, it's difficult to perfectly draw dynamic masks for different frames using only core nodes. However, to avoid requiring users to install additional custom nodes, our templates only use core nodes. You can refer to this implementation idea to achieve video inpainting.\n\nYou can use KJNode’s Points Editor and Sam2Segmentation to create some dynamic mask functions.\n\nCustom node links:\n- [ComfyUI-KJNodes](https://github.com/kijai/ComfyUI-KJNodes)\n- [ComfyUI-segment-anything-2](https://github.com/kijai/ComfyUI-segment-anything-2)" + "Currently, it's difficult to perfectly draw dynamic masks for different frames using only core nodes. However, to avoid requiring users to install additional custom nodes, our templates only use core nodes. You can refer to this implementation idea to achieve video inpainting.\n\nYou can use KJNode\u2019s Points Editor and Sam2Segmentation to create some dynamic mask functions.\n\nCustom node links:\n- [ComfyUI-KJNodes](https://github.com/kijai/ComfyUI-KJNodes)\n- [ComfyUI-segment-anything-2](https://github.com/kijai/ComfyUI-segment-anything-2)" ], "color": "#432", "bgcolor": "#000" @@ -1578,7 +1578,7 @@ "widget_ue_connectable": {} }, "widgets_values": [ - "过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走,过曝," + "\u8fc7\u66dd\uff0c\u9759\u6001\uff0c\u7ec6\u8282\u6a21\u7cca\u4e0d\u6e05\uff0c\u5b57\u5e55\uff0c\u98ce\u683c\uff0c\u4f5c\u54c1\uff0c\u753b\u4f5c\uff0c\u753b\u9762\uff0c\u9759\u6b62\uff0c\u6574\u4f53\u53d1\u7070\uff0c\u6700\u5dee\u8d28\u91cf\uff0c\u4f4e\u8d28\u91cf\uff0cJPEG\u538b\u7f29\u6b8b\u7559\uff0c\u4e11\u964b\u7684\uff0c\u6b8b\u7f3a\u7684\uff0c\u591a\u4f59\u7684\u624b\u6307\uff0c\u753b\u5f97\u4e0d\u597d\u7684\u624b\u90e8\uff0c\u753b\u5f97\u4e0d\u597d\u7684\u8138\u90e8\uff0c\u7578\u5f62\u7684\uff0c\u6bc1\u5bb9\u7684\uff0c\u5f62\u6001\u7578\u5f62\u7684\u80a2\u4f53\uff0c\u624b\u6307\u878d\u5408\uff0c\u9759\u6b62\u4e0d\u52a8\u7684\u753b\u9762\uff0c\u6742\u4e71\u7684\u80cc\u666f\uff0c\u4e09\u6761\u817f\uff0c\u80cc\u666f\u4eba\u5f88\u591a\uff0c\u5012\u7740\u8d70,\u8fc7\u66dd\uff0c" ], "color": "#223", "bgcolor": "#335" @@ -2368,7 +2368,8 @@ "extra": { "workflowRendererVersion": "LG" }, - "category": "Video generation and editing/Inpaint video" + "category": "Video generation and editing/Inpaint video", + "description": "Inpaints masked regions in video frames using Wan2.1 VACE." } ] }, @@ -2384,4 +2385,4 @@ } }, "version": 0.4 -} +} \ No newline at end of file diff --git a/blueprints/Video Stitch.json b/blueprints/Video Stitch.json index 020896d78..6eb0f0bbf 100644 --- a/blueprints/Video Stitch.json +++ b/blueprints/Video Stitch.json @@ -584,8 +584,9 @@ "extra": { "workflowRendererVersion": "LG" }, - "category": "Video Tools/Stitch videos" + "category": "Video Tools/Stitch videos", + "description": "Stitches multiple video clips into a single sequential video file." } ] } -} +} \ No newline at end of file diff --git a/blueprints/Video Upscale(GAN x4).json b/blueprints/Video Upscale(GAN x4).json index b61dc88d7..472287add 100644 --- a/blueprints/Video Upscale(GAN x4).json +++ b/blueprints/Video Upscale(GAN x4).json @@ -412,9 +412,10 @@ "extra": { "workflowRendererVersion": "LG" }, - "category": "Video generation and editing/Enhance video" + "category": "Video generation and editing/Enhance video", + "description": "Upscales video to 4\u00d7 resolution using a GAN-based upscaling model." } ] }, "extra": {} -} +} \ No newline at end of file