From 6773269c1ddad8d582c3dd961a39d394fefed8cb Mon Sep 17 00:00:00 2001 From: linmoumou Date: Fri, 8 May 2026 14:16:34 +0800 Subject: [PATCH] Refine blueprint descriptions with researched model specs from docs Updates subgraph descriptions across all 51 blueprints with accurate model details drawn from ComfyUI docs, including: - Flux.1 Dev: 12B open-weights, Pro-level quality - Flux.2 Klein 4B: fastest Flux, distilled architecture - Qwen-Image: 20B MMDiT, multilingual text rendering - Z-Image-Turbo: distilled 6B DiT, sub-second inference - LTX-2/2.3: 19B DiT audio-video foundation model - Wan2.2: open-source, 14B/1.3B variants - ACE-Step 1.5: ~1s full-song generation - GPU shader nodes consistently labeled as fragment shaders --- blueprints/Brightness and Contrast.json | 2 +- blueprints/Canny to Image (Z-Image-Turbo).json | 2 +- blueprints/Canny to Video (LTX 2.0).json | 2 +- blueprints/Chromatic Aberration.json | 4 ++-- blueprints/Color Adjustment.json | 4 ++-- blueprints/Color Balance.json | 2 +- blueprints/Color Curves.json | 2 +- blueprints/Crop Images 2x2.json | 2 +- blueprints/Crop Images 3x3.json | 4 ++-- blueprints/Depth to Video (ltx 2.0).json | 2 +- blueprints/Edge-Preserving Blur.json | 4 ++-- blueprints/Film Grain.json | 4 ++-- blueprints/Glow.json | 2 +- blueprints/Hue and Saturation.json | 2 +- blueprints/Image Captioning (gemini).json | 2 +- blueprints/Image Channels.json | 2 +- blueprints/Image Edit (FireRed Image Edit 1.1).json | 2 +- blueprints/Image Edit (Flux.2 Klein 4B).json | 4 ++-- blueprints/Image Edit (LongCat Image Edit).json | 2 +- blueprints/Image Edit (Qwen 2511).json | 2 +- blueprints/Image Inpainting (Flux.1 Fill Dev).json | 2 +- blueprints/Image Inpainting (Qwen-image).json | 4 ++-- blueprints/Image Levels.json | 2 +- blueprints/Image Outpainting (Qwen-Image).json | 6 +++--- blueprints/Image Upscale(Z-image-Turbo).json | 2 +- blueprints/Image to Layers(Qwen-Image-Layered).json | 2 +- blueprints/Image to Model (Hunyuan3d 2.1).json | 2 +- blueprints/Image to Video (LTX-2.3).json | 2 +- blueprints/Image to Video (Wan 2.2).json | 8 ++++---- blueprints/Pose to Image (Z-Image-Turbo).json | 2 +- blueprints/Pose to Video (LTX 2.0).json | 2 +- blueprints/Prompt Enhance.json | 2 +- blueprints/Sharpen.json | 4 ++-- blueprints/Text to Audio (ACE-Step 1.5).json | 2 +- blueprints/Text to Image (Flux.1 Dev).json | 2 +- blueprints/Text to Image (Flux.1 Krea Dev).json | 2 +- blueprints/Text to Image (NetaYume Lumina).json | 4 ++-- blueprints/Text to Image (Qwen-Image 2512).json | 4 ++-- blueprints/Text to Image (Qwen-Image).json | 2 +- blueprints/Text to Image (Z-Image-Turbo).json | 2 +- blueprints/Text to Video (LTX-2.3).json | 2 +- blueprints/Text to Video (Wan 2.2).json | 6 +++--- blueprints/Unsharp Mask.json | 2 +- blueprints/Video Captioning (Gemini).json | 2 +- blueprints/Video Inpaint(Wan2.1 VACE).json | 8 ++++---- blueprints/Video Stitch.json | 2 +- blueprints/Video Upscale(GAN x4).json | 2 +- 47 files changed, 67 insertions(+), 67 deletions(-) diff --git a/blueprints/Brightness and Contrast.json b/blueprints/Brightness and Contrast.json index 146b7966a..78fc52f29 100644 --- a/blueprints/Brightness and Contrast.json +++ b/blueprints/Brightness and Contrast.json @@ -432,7 +432,7 @@ "workflowRendererVersion": "LG" }, "category": "Image Tools/Color adjust", - "description": "Adjusts image brightness and contrast using real-time GPU shader processing." + "description": "Adjusts image brightness and contrast using a real-time GPU fragment shader." } ] }, diff --git a/blueprints/Canny to Image (Z-Image-Turbo).json b/blueprints/Canny to Image (Z-Image-Turbo).json index fec6c22a4..ee36fc916 100644 --- a/blueprints/Canny to Image (Z-Image-Turbo).json +++ b/blueprints/Canny to Image (Z-Image-Turbo).json @@ -1554,7 +1554,7 @@ "VHS_KeepIntermediate": true }, "category": "Image generation and editing/Canny to image", - "description": "Generates an image from a Canny edge map using Z-Image-Turbo, optimized for fast text-conditioned edge-to-image generation." + "description": "Generates an image from a Canny edge map using Z-Image-Turbo, with text conditioning and fast inference." } ] }, diff --git a/blueprints/Canny to Video (LTX 2.0).json b/blueprints/Canny to Video (LTX 2.0).json index 7c752addc..bc0143760 100644 --- a/blueprints/Canny to Video (LTX 2.0).json +++ b/blueprints/Canny to Video (LTX 2.0).json @@ -3601,7 +3601,7 @@ "workflowRendererVersion": "LG" }, "category": "Video generation and editing/Canny to video", - "description": "Generates video frames from Canny edge maps using LTX 2.0 with text conditioning." + "description": "Generates synchronised video+audio from Canny edge maps using LTX-2, Lightricks' 19B DiT audio-video foundation model." } ] }, diff --git a/blueprints/Chromatic Aberration.json b/blueprints/Chromatic Aberration.json index bcd0a7cd5..60be1e921 100644 --- a/blueprints/Chromatic Aberration.json +++ b/blueprints/Chromatic Aberration.json @@ -232,7 +232,7 @@ "Node name for S&R": "GLSLShader" }, "widgets_values": [ - "#version 300 es\nprecision highp float;\n\nuniform sampler2D u_image0;\nuniform vec2 u_resolution;\nuniform int u_int0; // Mode\nuniform float u_float0; // Amount (0 to 100)\n\nin vec2 v_texCoord;\nout vec4 fragColor;\n\nconst int MODE_LINEAR = 0;\nconst int MODE_RADIAL = 1;\nconst int MODE_BARREL = 2;\nconst int MODE_SWIRL = 3;\nconst int MODE_DIAGONAL = 4;\n\nconst float AMOUNT_SCALE = 0.0005;\nconst float RADIAL_MULT = 4.0;\nconst float BARREL_MULT = 8.0;\nconst float INV_SQRT2 = 0.70710678118;\n\nvoid main() {\n vec2 uv = v_texCoord;\n vec4 original = texture(u_image0, uv);\n\n float amount = u_float0 * AMOUNT_SCALE;\n\n if (amount < 0.000001) {\n fragColor = original;\n return;\n }\n\n // Aspect-corrected coordinates for circular effects\n float aspect = u_resolution.x / u_resolution.y;\n vec2 centered = uv - 0.5;\n vec2 corrected = vec2(centered.x * aspect, centered.y);\n float r = length(corrected);\n vec2 dir = r > 0.0001 ? corrected / r : vec2(0.0);\n vec2 offset = vec2(0.0);\n\n if (u_int0 == MODE_LINEAR) {\n // Horizontal shift (no aspect correction needed)\n offset = vec2(amount, 0.0);\n }\n else if (u_int0 == MODE_RADIAL) {\n // Outward from center, stronger at edges\n offset = dir * r * amount * RADIAL_MULT;\n offset.x /= aspect; // Convert back to UV space\n }\n else if (u_int0 == MODE_BARREL) {\n // Lens distortion simulation (r² falloff)\n offset = dir * r * r * amount * BARREL_MULT;\n offset.x /= aspect; // Convert back to UV space\n }\n else if (u_int0 == MODE_SWIRL) {\n // Perpendicular to radial (rotational aberration)\n vec2 perp = vec2(-dir.y, dir.x);\n offset = perp * r * amount * RADIAL_MULT;\n offset.x /= aspect; // Convert back to UV space\n }\n else if (u_int0 == MODE_DIAGONAL) {\n // 45° offset (no aspect correction needed)\n offset = vec2(amount, amount) * INV_SQRT2;\n }\n \n float red = texture(u_image0, uv + offset).r;\n float green = original.g;\n float blue = texture(u_image0, uv - offset).b;\n \n fragColor = vec4(red, green, blue, original.a);\n}", + "#version 300 es\nprecision highp float;\n\nuniform sampler2D u_image0;\nuniform vec2 u_resolution;\nuniform int u_int0; // Mode\nuniform float u_float0; // Amount (0 to 100)\n\nin vec2 v_texCoord;\nout vec4 fragColor;\n\nconst int MODE_LINEAR = 0;\nconst int MODE_RADIAL = 1;\nconst int MODE_BARREL = 2;\nconst int MODE_SWIRL = 3;\nconst int MODE_DIAGONAL = 4;\n\nconst float AMOUNT_SCALE = 0.0005;\nconst float RADIAL_MULT = 4.0;\nconst float BARREL_MULT = 8.0;\nconst float INV_SQRT2 = 0.70710678118;\n\nvoid main() {\n vec2 uv = v_texCoord;\n vec4 original = texture(u_image0, uv);\n\n float amount = u_float0 * AMOUNT_SCALE;\n\n if (amount < 0.000001) {\n fragColor = original;\n return;\n }\n\n // Aspect-corrected coordinates for circular effects\n float aspect = u_resolution.x / u_resolution.y;\n vec2 centered = uv - 0.5;\n vec2 corrected = vec2(centered.x * aspect, centered.y);\n float r = length(corrected);\n vec2 dir = r > 0.0001 ? corrected / r : vec2(0.0);\n vec2 offset = vec2(0.0);\n\n if (u_int0 == MODE_LINEAR) {\n // Horizontal shift (no aspect correction needed)\n offset = vec2(amount, 0.0);\n }\n else if (u_int0 == MODE_RADIAL) {\n // Outward from center, stronger at edges\n offset = dir * r * amount * RADIAL_MULT;\n offset.x /= aspect; // Convert back to UV space\n }\n else if (u_int0 == MODE_BARREL) {\n // Lens distortion simulation (r\u00b2 falloff)\n offset = dir * r * r * amount * BARREL_MULT;\n offset.x /= aspect; // Convert back to UV space\n }\n else if (u_int0 == MODE_SWIRL) {\n // Perpendicular to radial (rotational aberration)\n vec2 perp = vec2(-dir.y, dir.x);\n offset = perp * r * amount * RADIAL_MULT;\n offset.x /= aspect; // Convert back to UV space\n }\n else if (u_int0 == MODE_DIAGONAL) {\n // 45\u00b0 offset (no aspect correction needed)\n offset = vec2(amount, amount) * INV_SQRT2;\n }\n \n float red = texture(u_image0, uv + offset).r;\n float green = original.g;\n float blue = texture(u_image0, uv - offset).b;\n \n fragColor = vec4(red, green, blue, original.a);\n}", "from_input" ] }, @@ -378,7 +378,7 @@ "workflowRendererVersion": "LG" }, "category": "Image Tools/Color adjust", - "description": "Adds or corrects chromatic aberration (color fringing) for creative lens effect emulation." + "description": "Adds lens-style chromatic aberration (color fringing) using a real-time GPU fragment shader." } ] } diff --git a/blueprints/Color Adjustment.json b/blueprints/Color Adjustment.json index 2b73c0a47..44f5e1a92 100644 --- a/blueprints/Color Adjustment.json +++ b/blueprints/Color Adjustment.json @@ -256,7 +256,7 @@ "Node name for S&R": "GLSLShader" }, "widgets_values": [ - "#version 300 es\nprecision highp float;\n\nuniform sampler2D u_image0;\nuniform float u_float0; // temperature (-100 to 100)\nuniform float u_float1; // tint (-100 to 100)\nuniform float u_float2; // vibrance (-100 to 100)\nuniform float u_float3; // saturation (-100 to 100)\n\nin vec2 v_texCoord;\nout vec4 fragColor;\n\nconst float INPUT_SCALE = 0.01;\nconst float TEMP_TINT_PRIMARY = 0.3;\nconst float TEMP_TINT_SECONDARY = 0.15;\nconst float VIBRANCE_BOOST = 2.0;\nconst float SATURATION_BOOST = 2.0;\nconst float SKIN_PROTECTION = 0.5;\nconst float EPSILON = 0.001;\nconst vec3 LUMA_WEIGHTS = vec3(0.299, 0.587, 0.114);\n\nvoid main() {\n vec4 tex = texture(u_image0, v_texCoord);\n vec3 color = tex.rgb;\n \n // Scale inputs: -100/100 → -1/1\n float temperature = u_float0 * INPUT_SCALE;\n float tint = u_float1 * INPUT_SCALE;\n float vibrance = u_float2 * INPUT_SCALE;\n float saturation = u_float3 * INPUT_SCALE;\n \n // Temperature (warm/cool): positive = warm, negative = cool\n color.r += temperature * TEMP_TINT_PRIMARY;\n color.b -= temperature * TEMP_TINT_PRIMARY;\n \n // Tint (green/magenta): positive = green, negative = magenta\n color.g += tint * TEMP_TINT_PRIMARY;\n color.r -= tint * TEMP_TINT_SECONDARY;\n color.b -= tint * TEMP_TINT_SECONDARY;\n \n // Single clamp after temperature/tint\n color = clamp(color, 0.0, 1.0);\n \n // Vibrance with skin protection\n if (vibrance != 0.0) {\n float maxC = max(color.r, max(color.g, color.b));\n float minC = min(color.r, min(color.g, color.b));\n float sat = maxC - minC;\n float gray = dot(color, LUMA_WEIGHTS);\n \n if (vibrance < 0.0) {\n // Desaturate: -100 → gray\n color = mix(vec3(gray), color, 1.0 + vibrance);\n } else {\n // Boost less saturated colors more\n float vibranceAmt = vibrance * (1.0 - sat);\n \n // Branchless skin tone protection\n float isWarmTone = step(color.b, color.g) * step(color.g, color.r);\n float warmth = (color.r - color.b) / max(maxC, EPSILON);\n float skinTone = isWarmTone * warmth * sat * (1.0 - sat);\n vibranceAmt *= (1.0 - skinTone * SKIN_PROTECTION);\n \n color = mix(vec3(gray), color, 1.0 + vibranceAmt * VIBRANCE_BOOST);\n }\n }\n \n // Saturation\n if (saturation != 0.0) {\n float gray = dot(color, LUMA_WEIGHTS);\n float satMix = saturation < 0.0\n ? 1.0 + saturation // -100 → gray\n : 1.0 + saturation * SATURATION_BOOST; // +100 → 3x boost\n color = mix(vec3(gray), color, satMix);\n }\n \n fragColor = vec4(clamp(color, 0.0, 1.0), tex.a);\n}", + "#version 300 es\nprecision highp float;\n\nuniform sampler2D u_image0;\nuniform float u_float0; // temperature (-100 to 100)\nuniform float u_float1; // tint (-100 to 100)\nuniform float u_float2; // vibrance (-100 to 100)\nuniform float u_float3; // saturation (-100 to 100)\n\nin vec2 v_texCoord;\nout vec4 fragColor;\n\nconst float INPUT_SCALE = 0.01;\nconst float TEMP_TINT_PRIMARY = 0.3;\nconst float TEMP_TINT_SECONDARY = 0.15;\nconst float VIBRANCE_BOOST = 2.0;\nconst float SATURATION_BOOST = 2.0;\nconst float SKIN_PROTECTION = 0.5;\nconst float EPSILON = 0.001;\nconst vec3 LUMA_WEIGHTS = vec3(0.299, 0.587, 0.114);\n\nvoid main() {\n vec4 tex = texture(u_image0, v_texCoord);\n vec3 color = tex.rgb;\n \n // Scale inputs: -100/100 \u2192 -1/1\n float temperature = u_float0 * INPUT_SCALE;\n float tint = u_float1 * INPUT_SCALE;\n float vibrance = u_float2 * INPUT_SCALE;\n float saturation = u_float3 * INPUT_SCALE;\n \n // Temperature (warm/cool): positive = warm, negative = cool\n color.r += temperature * TEMP_TINT_PRIMARY;\n color.b -= temperature * TEMP_TINT_PRIMARY;\n \n // Tint (green/magenta): positive = green, negative = magenta\n color.g += tint * TEMP_TINT_PRIMARY;\n color.r -= tint * TEMP_TINT_SECONDARY;\n color.b -= tint * TEMP_TINT_SECONDARY;\n \n // Single clamp after temperature/tint\n color = clamp(color, 0.0, 1.0);\n \n // Vibrance with skin protection\n if (vibrance != 0.0) {\n float maxC = max(color.r, max(color.g, color.b));\n float minC = min(color.r, min(color.g, color.b));\n float sat = maxC - minC;\n float gray = dot(color, LUMA_WEIGHTS);\n \n if (vibrance < 0.0) {\n // Desaturate: -100 \u2192 gray\n color = mix(vec3(gray), color, 1.0 + vibrance);\n } else {\n // Boost less saturated colors more\n float vibranceAmt = vibrance * (1.0 - sat);\n \n // Branchless skin tone protection\n float isWarmTone = step(color.b, color.g) * step(color.g, color.r);\n float warmth = (color.r - color.b) / max(maxC, EPSILON);\n float skinTone = isWarmTone * warmth * sat * (1.0 - sat);\n vibranceAmt *= (1.0 - skinTone * SKIN_PROTECTION);\n \n color = mix(vec3(gray), color, 1.0 + vibranceAmt * VIBRANCE_BOOST);\n }\n }\n \n // Saturation\n if (saturation != 0.0) {\n float gray = dot(color, LUMA_WEIGHTS);\n float satMix = saturation < 0.0\n ? 1.0 + saturation // -100 \u2192 gray\n : 1.0 + saturation * SATURATION_BOOST; // +100 \u2192 3x boost\n color = mix(vec3(gray), color, satMix);\n }\n \n fragColor = vec4(clamp(color, 0.0, 1.0), tex.a);\n}", "from_input" ] }, @@ -597,7 +597,7 @@ "workflowRendererVersion": "LG" }, "category": "Image Tools/Color adjust", - "description": "Adjusts image saturation, temperature, and tint using real-time GPU shader processing." + "description": "Adjusts saturation, temperature, and tint using a real-time GPU fragment shader." } ] } diff --git a/blueprints/Color Balance.json b/blueprints/Color Balance.json index f850cf288..d921eab37 100644 --- a/blueprints/Color Balance.json +++ b/blueprints/Color Balance.json @@ -1130,7 +1130,7 @@ "workflowRendererVersion": "LG" }, "category": "Image Tools/Color adjust", - "description": "Balances image colors across shadows, midtones, and highlights via real-time GPU shader." + "description": "Balances colors across shadows, midtones, and highlights using a real-time GPU fragment shader." } ] } diff --git a/blueprints/Color Curves.json b/blueprints/Color Curves.json index 96584bafd..b9bfb7029 100644 --- a/blueprints/Color Curves.json +++ b/blueprints/Color Curves.json @@ -609,7 +609,7 @@ "workflowRendererVersion": "LG" }, "category": "Image Tools/Color adjust", - "description": "Fine-tunes image tone and color using per-channel curve adjustments via real-time GPU shader." + "description": "Fine-tunes tone and color with per-channel curve adjustments using a real-time GPU fragment shader." } ] } diff --git a/blueprints/Crop Images 2x2.json b/blueprints/Crop Images 2x2.json index 33f749fba..261de1856 100644 --- a/blueprints/Crop Images 2x2.json +++ b/blueprints/Crop Images 2x2.json @@ -1610,7 +1610,7 @@ ], "extra": {}, "category": "Image Tools/Crop", - "description": "Splits an image into a 2×2 grid of four equal tiles for batch processing or composition." + "description": "Splits an image into a 2\u00d72 grid of four equal tiles." } ] }, diff --git a/blueprints/Crop Images 3x3.json b/blueprints/Crop Images 3x3.json index 9f6978ac6..74176dc3e 100644 --- a/blueprints/Crop Images 3x3.json +++ b/blueprints/Crop Images 3x3.json @@ -368,7 +368,7 @@ ] } ], - "title": "Math Expression (Width)", + "title": "Math Expression \uff08Width\uff09", "properties": { "ue_properties": { "widget_ue_connectable": {}, @@ -2947,7 +2947,7 @@ ], "extra": {}, "category": "Image Tools/Crop", - "description": "Splits an image into a 3×3 grid of nine equal tiles for batch processing or composition." + "description": "Splits an image into a 3\u00d73 grid of nine equal tiles." } ] }, diff --git a/blueprints/Depth to Video (ltx 2.0).json b/blueprints/Depth to Video (ltx 2.0).json index af084a503..7ee8db7a5 100644 --- a/blueprints/Depth to Video (ltx 2.0).json +++ b/blueprints/Depth to Video (ltx 2.0).json @@ -4234,7 +4234,7 @@ "workflowRendererVersion": "LG" }, "category": "Video generation and editing/Depth to video", - "description": "Generates video from a depth map sequence using LTX 2.0 with text conditioning." + "description": "Generates synchronised video+audio from depth maps using LTX-2's DiT architecture." }, { "id": "38b60539-50a7-42f9-a5fe-bdeca26272e2", diff --git a/blueprints/Edge-Preserving Blur.json b/blueprints/Edge-Preserving Blur.json index 659581ed4..d3a61e3f2 100644 --- a/blueprints/Edge-Preserving Blur.json +++ b/blueprints/Edge-Preserving Blur.json @@ -252,7 +252,7 @@ "Node name for S&R": "GLSLShader" }, "widgets_values": [ - "#version 300 es\nprecision highp float;\n\nuniform sampler2D u_image0;\nuniform float u_float0; // Blur radius (0–20, default ~5)\nuniform float u_float1; // Edge threshold (0–100, default ~30)\nuniform int u_int0; // Step size (0/1 = every pixel, 2+ = skip pixels)\n\nin vec2 v_texCoord;\nout vec4 fragColor;\n\nconst int MAX_RADIUS = 20;\nconst float EPSILON = 0.0001;\n\n// Perceptual luminance\nfloat getLuminance(vec3 rgb) {\n return dot(rgb, vec3(0.299, 0.587, 0.114));\n}\n\nvec4 bilateralFilter(vec2 uv, vec2 texelSize, int radius,\n float sigmaSpatial, float sigmaColor)\n{\n vec4 center = texture(u_image0, uv);\n vec3 centerRGB = center.rgb;\n\n float invSpatial2 = -0.5 / (sigmaSpatial * sigmaSpatial);\n float invColor2 = -0.5 / (sigmaColor * sigmaColor + EPSILON);\n\n vec3 sumRGB = vec3(0.0);\n float sumWeight = 0.0;\n\n int step = max(u_int0, 1);\n float radius2 = float(radius * radius);\n\n for (int dy = -MAX_RADIUS; dy <= MAX_RADIUS; dy++) {\n if (dy < -radius || dy > radius) continue;\n if (abs(dy) % step != 0) continue;\n\n for (int dx = -MAX_RADIUS; dx <= MAX_RADIUS; dx++) {\n if (dx < -radius || dx > radius) continue;\n if (abs(dx) % step != 0) continue;\n\n vec2 offset = vec2(float(dx), float(dy));\n float dist2 = dot(offset, offset);\n if (dist2 > radius2) continue;\n\n vec3 sampleRGB = texture(u_image0, uv + offset * texelSize).rgb;\n\n // Spatial Gaussian\n float spatialWeight = exp(dist2 * invSpatial2);\n\n // Perceptual color distance (weighted RGB)\n vec3 diff = sampleRGB - centerRGB;\n float colorDist = dot(diff * diff, vec3(0.299, 0.587, 0.114));\n float colorWeight = exp(colorDist * invColor2);\n\n float w = spatialWeight * colorWeight;\n sumRGB += sampleRGB * w;\n sumWeight += w;\n }\n }\n\n vec3 resultRGB = sumRGB / max(sumWeight, EPSILON);\n return vec4(resultRGB, center.a); // preserve center alpha\n}\n\nvoid main() {\n vec2 texelSize = 1.0 / vec2(textureSize(u_image0, 0));\n\n float radiusF = clamp(u_float0, 0.0, float(MAX_RADIUS));\n int radius = int(radiusF + 0.5);\n\n if (radius == 0) {\n fragColor = texture(u_image0, v_texCoord);\n return;\n }\n\n // Edge threshold → color sigma\n // Squared curve for better low-end control\n float t = clamp(u_float1, 0.0, 100.0) / 100.0;\n t *= t;\n float sigmaColor = mix(0.01, 0.5, t);\n\n // Spatial sigma tied to radius\n float sigmaSpatial = max(radiusF * 0.75, 0.5);\n\n fragColor = bilateralFilter(\n v_texCoord,\n texelSize,\n radius,\n sigmaSpatial,\n sigmaColor\n );\n}", + "#version 300 es\nprecision highp float;\n\nuniform sampler2D u_image0;\nuniform float u_float0; // Blur radius (0\u201320, default ~5)\nuniform float u_float1; // Edge threshold (0\u2013100, default ~30)\nuniform int u_int0; // Step size (0/1 = every pixel, 2+ = skip pixels)\n\nin vec2 v_texCoord;\nout vec4 fragColor;\n\nconst int MAX_RADIUS = 20;\nconst float EPSILON = 0.0001;\n\n// Perceptual luminance\nfloat getLuminance(vec3 rgb) {\n return dot(rgb, vec3(0.299, 0.587, 0.114));\n}\n\nvec4 bilateralFilter(vec2 uv, vec2 texelSize, int radius,\n float sigmaSpatial, float sigmaColor)\n{\n vec4 center = texture(u_image0, uv);\n vec3 centerRGB = center.rgb;\n\n float invSpatial2 = -0.5 / (sigmaSpatial * sigmaSpatial);\n float invColor2 = -0.5 / (sigmaColor * sigmaColor + EPSILON);\n\n vec3 sumRGB = vec3(0.0);\n float sumWeight = 0.0;\n\n int step = max(u_int0, 1);\n float radius2 = float(radius * radius);\n\n for (int dy = -MAX_RADIUS; dy <= MAX_RADIUS; dy++) {\n if (dy < -radius || dy > radius) continue;\n if (abs(dy) % step != 0) continue;\n\n for (int dx = -MAX_RADIUS; dx <= MAX_RADIUS; dx++) {\n if (dx < -radius || dx > radius) continue;\n if (abs(dx) % step != 0) continue;\n\n vec2 offset = vec2(float(dx), float(dy));\n float dist2 = dot(offset, offset);\n if (dist2 > radius2) continue;\n\n vec3 sampleRGB = texture(u_image0, uv + offset * texelSize).rgb;\n\n // Spatial Gaussian\n float spatialWeight = exp(dist2 * invSpatial2);\n\n // Perceptual color distance (weighted RGB)\n vec3 diff = sampleRGB - centerRGB;\n float colorDist = dot(diff * diff, vec3(0.299, 0.587, 0.114));\n float colorWeight = exp(colorDist * invColor2);\n\n float w = spatialWeight * colorWeight;\n sumRGB += sampleRGB * w;\n sumWeight += w;\n }\n }\n\n vec3 resultRGB = sumRGB / max(sumWeight, EPSILON);\n return vec4(resultRGB, center.a); // preserve center alpha\n}\n\nvoid main() {\n vec2 texelSize = 1.0 / vec2(textureSize(u_image0, 0));\n\n float radiusF = clamp(u_float0, 0.0, float(MAX_RADIUS));\n int radius = int(radiusF + 0.5);\n\n if (radius == 0) {\n fragColor = texture(u_image0, v_texCoord);\n return;\n }\n\n // Edge threshold \u2192 color sigma\n // Squared curve for better low-end control\n float t = clamp(u_float1, 0.0, 100.0) / 100.0;\n t *= t;\n float sigmaColor = mix(0.01, 0.5, t);\n\n // Spatial sigma tied to radius\n float sigmaSpatial = max(radiusF * 0.75, 0.5);\n\n fragColor = bilateralFilter(\n v_texCoord,\n texelSize,\n radius,\n sigmaSpatial,\n sigmaColor\n );\n}", "from_input" ] }, @@ -451,7 +451,7 @@ "workflowRendererVersion": "LG" }, "category": "Image Tools/Blur", - "description": "Applies edge-preserving (bilateral) blur to soften images while retaining detail." + "description": "Applies bilateral (edge-preserving) blur to soften images while retaining detail." } ] }, diff --git a/blueprints/Film Grain.json b/blueprints/Film Grain.json index 2bdb6a8a7..d000b713d 100644 --- a/blueprints/Film Grain.json +++ b/blueprints/Film Grain.json @@ -268,7 +268,7 @@ "Node name for S&R": "GLSLShader" }, "widgets_values": [ - "#version 300 es\nprecision highp float;\n\nuniform sampler2D u_image0;\nuniform vec2 u_resolution;\nuniform float u_float0; // grain amount [0.0 – 1.0] typical: 0.2–0.8\nuniform float u_float1; // grain size [0.3 – 3.0] lower = finer grain\nuniform float u_float2; // color amount [0.0 – 1.0] 0 = monochrome, 1 = RGB grain\nuniform float u_float3; // luminance bias [0.0 – 1.0] 0 = uniform, 1 = shadows only\nuniform int u_int0; // noise mode [0 or 1] 0 = smooth, 1 = grainy\n\nin vec2 v_texCoord;\nlayout(location = 0) out vec4 fragColor0;\n\n// High-quality integer hash (pcg-like)\nuint pcg(uint v) {\n uint state = v * 747796405u + 2891336453u;\n uint word = ((state >> ((state >> 28u) + 4u)) ^ state) * 277803737u;\n return (word >> 22u) ^ word;\n}\n\n// 2D -> 1D hash input\nuint hash2d(uvec2 p) {\n return pcg(p.x + pcg(p.y));\n}\n\n// Hash to float [0, 1]\nfloat hashf(uvec2 p) {\n return float(hash2d(p)) / float(0xffffffffu);\n}\n\n// Hash to float with offset (for RGB channels)\nfloat hashf(uvec2 p, uint offset) {\n return float(pcg(hash2d(p) + offset)) / float(0xffffffffu);\n}\n\n// Convert uniform [0,1] to roughly Gaussian distribution\n// Using simple approximation: average of multiple samples\nfloat toGaussian(uvec2 p) {\n float sum = hashf(p, 0u) + hashf(p, 1u) + hashf(p, 2u) + hashf(p, 3u);\n return (sum - 2.0) * 0.7; // Centered, scaled\n}\n\nfloat toGaussian(uvec2 p, uint offset) {\n float sum = hashf(p, offset) + hashf(p, offset + 1u) \n + hashf(p, offset + 2u) + hashf(p, offset + 3u);\n return (sum - 2.0) * 0.7;\n}\n\n// Smooth noise with better interpolation\nfloat smoothNoise(vec2 p) {\n vec2 i = floor(p);\n vec2 f = fract(p);\n \n // Quintic interpolation (less banding than cubic)\n f = f * f * f * (f * (f * 6.0 - 15.0) + 10.0);\n \n uvec2 ui = uvec2(i);\n float a = toGaussian(ui);\n float b = toGaussian(ui + uvec2(1u, 0u));\n float c = toGaussian(ui + uvec2(0u, 1u));\n float d = toGaussian(ui + uvec2(1u, 1u));\n \n return mix(mix(a, b, f.x), mix(c, d, f.x), f.y);\n}\n\nfloat smoothNoise(vec2 p, uint offset) {\n vec2 i = floor(p);\n vec2 f = fract(p);\n \n f = f * f * f * (f * (f * 6.0 - 15.0) + 10.0);\n \n uvec2 ui = uvec2(i);\n float a = toGaussian(ui, offset);\n float b = toGaussian(ui + uvec2(1u, 0u), offset);\n float c = toGaussian(ui + uvec2(0u, 1u), offset);\n float d = toGaussian(ui + uvec2(1u, 1u), offset);\n \n return mix(mix(a, b, f.x), mix(c, d, f.x), f.y);\n}\n\nvoid main() {\n vec4 color = texture(u_image0, v_texCoord);\n \n // Luminance (Rec.709)\n float luma = dot(color.rgb, vec3(0.2126, 0.7152, 0.0722));\n \n // Grain UV (resolution-independent)\n vec2 grainUV = v_texCoord * u_resolution / max(u_float1, 0.01);\n uvec2 grainPixel = uvec2(grainUV);\n \n float g;\n vec3 grainRGB;\n \n if (u_int0 == 1) {\n // Grainy mode: pure hash noise (no interpolation = no banding)\n g = toGaussian(grainPixel);\n grainRGB = vec3(\n toGaussian(grainPixel, 100u),\n toGaussian(grainPixel, 200u),\n toGaussian(grainPixel, 300u)\n );\n } else {\n // Smooth mode: interpolated with quintic curve\n g = smoothNoise(grainUV);\n grainRGB = vec3(\n smoothNoise(grainUV, 100u),\n smoothNoise(grainUV, 200u),\n smoothNoise(grainUV, 300u)\n );\n }\n \n // Luminance weighting (less grain in highlights)\n float lumWeight = mix(1.0, 1.0 - luma, clamp(u_float3, 0.0, 1.0));\n \n // Strength\n float strength = u_float0 * 0.15;\n \n // Color vs monochrome grain\n vec3 grainColor = mix(vec3(g), grainRGB, clamp(u_float2, 0.0, 1.0));\n \n color.rgb += grainColor * strength * lumWeight;\n fragColor0 = vec4(clamp(color.rgb, 0.0, 1.0), color.a);\n}\n", + "#version 300 es\nprecision highp float;\n\nuniform sampler2D u_image0;\nuniform vec2 u_resolution;\nuniform float u_float0; // grain amount [0.0 \u2013 1.0] typical: 0.2\u20130.8\nuniform float u_float1; // grain size [0.3 \u2013 3.0] lower = finer grain\nuniform float u_float2; // color amount [0.0 \u2013 1.0] 0 = monochrome, 1 = RGB grain\nuniform float u_float3; // luminance bias [0.0 \u2013 1.0] 0 = uniform, 1 = shadows only\nuniform int u_int0; // noise mode [0 or 1] 0 = smooth, 1 = grainy\n\nin vec2 v_texCoord;\nlayout(location = 0) out vec4 fragColor0;\n\n// High-quality integer hash (pcg-like)\nuint pcg(uint v) {\n uint state = v * 747796405u + 2891336453u;\n uint word = ((state >> ((state >> 28u) + 4u)) ^ state) * 277803737u;\n return (word >> 22u) ^ word;\n}\n\n// 2D -> 1D hash input\nuint hash2d(uvec2 p) {\n return pcg(p.x + pcg(p.y));\n}\n\n// Hash to float [0, 1]\nfloat hashf(uvec2 p) {\n return float(hash2d(p)) / float(0xffffffffu);\n}\n\n// Hash to float with offset (for RGB channels)\nfloat hashf(uvec2 p, uint offset) {\n return float(pcg(hash2d(p) + offset)) / float(0xffffffffu);\n}\n\n// Convert uniform [0,1] to roughly Gaussian distribution\n// Using simple approximation: average of multiple samples\nfloat toGaussian(uvec2 p) {\n float sum = hashf(p, 0u) + hashf(p, 1u) + hashf(p, 2u) + hashf(p, 3u);\n return (sum - 2.0) * 0.7; // Centered, scaled\n}\n\nfloat toGaussian(uvec2 p, uint offset) {\n float sum = hashf(p, offset) + hashf(p, offset + 1u) \n + hashf(p, offset + 2u) + hashf(p, offset + 3u);\n return (sum - 2.0) * 0.7;\n}\n\n// Smooth noise with better interpolation\nfloat smoothNoise(vec2 p) {\n vec2 i = floor(p);\n vec2 f = fract(p);\n \n // Quintic interpolation (less banding than cubic)\n f = f * f * f * (f * (f * 6.0 - 15.0) + 10.0);\n \n uvec2 ui = uvec2(i);\n float a = toGaussian(ui);\n float b = toGaussian(ui + uvec2(1u, 0u));\n float c = toGaussian(ui + uvec2(0u, 1u));\n float d = toGaussian(ui + uvec2(1u, 1u));\n \n return mix(mix(a, b, f.x), mix(c, d, f.x), f.y);\n}\n\nfloat smoothNoise(vec2 p, uint offset) {\n vec2 i = floor(p);\n vec2 f = fract(p);\n \n f = f * f * f * (f * (f * 6.0 - 15.0) + 10.0);\n \n uvec2 ui = uvec2(i);\n float a = toGaussian(ui, offset);\n float b = toGaussian(ui + uvec2(1u, 0u), offset);\n float c = toGaussian(ui + uvec2(0u, 1u), offset);\n float d = toGaussian(ui + uvec2(1u, 1u), offset);\n \n return mix(mix(a, b, f.x), mix(c, d, f.x), f.y);\n}\n\nvoid main() {\n vec4 color = texture(u_image0, v_texCoord);\n \n // Luminance (Rec.709)\n float luma = dot(color.rgb, vec3(0.2126, 0.7152, 0.0722));\n \n // Grain UV (resolution-independent)\n vec2 grainUV = v_texCoord * u_resolution / max(u_float1, 0.01);\n uvec2 grainPixel = uvec2(grainUV);\n \n float g;\n vec3 grainRGB;\n \n if (u_int0 == 1) {\n // Grainy mode: pure hash noise (no interpolation = no banding)\n g = toGaussian(grainPixel);\n grainRGB = vec3(\n toGaussian(grainPixel, 100u),\n toGaussian(grainPixel, 200u),\n toGaussian(grainPixel, 300u)\n );\n } else {\n // Smooth mode: interpolated with quintic curve\n g = smoothNoise(grainUV);\n grainRGB = vec3(\n smoothNoise(grainUV, 100u),\n smoothNoise(grainUV, 200u),\n smoothNoise(grainUV, 300u)\n );\n }\n \n // Luminance weighting (less grain in highlights)\n float lumWeight = mix(1.0, 1.0 - luma, clamp(u_float3, 0.0, 1.0));\n \n // Strength\n float strength = u_float0 * 0.15;\n \n // Color vs monochrome grain\n vec3 grainColor = mix(vec3(g), grainRGB, clamp(u_float2, 0.0, 1.0));\n \n color.rgb += grainColor * strength * lumWeight;\n fragColor0 = vec4(clamp(color.rgb, 0.0, 1.0), color.a);\n}\n", "from_input" ] }, @@ -581,7 +581,7 @@ "workflowRendererVersion": "LG" }, "category": "Image Tools/Color adjust", - "description": "Adds procedural film grain texture for a cinematic look via real-time GPU shader." + "description": "Adds procedural film grain texture for a cinematic look via GPU fragment shader." } ] } diff --git a/blueprints/Glow.json b/blueprints/Glow.json index 897bfe9e5..2bbfdee51 100644 --- a/blueprints/Glow.json +++ b/blueprints/Glow.json @@ -576,7 +576,7 @@ "workflowRendererVersion": "LG" }, "category": "Image Tools/Color adjust", - "description": "Adds a glow or bloom effect around bright image areas via real-time GPU shader." + "description": "Adds a glow/bloom effect around bright image areas via GPU fragment shader." } ] } diff --git a/blueprints/Hue and Saturation.json b/blueprints/Hue and Saturation.json index 6b4c96d44..cddf0154a 100644 --- a/blueprints/Hue and Saturation.json +++ b/blueprints/Hue and Saturation.json @@ -753,7 +753,7 @@ "workflowRendererVersion": "LG" }, "category": "Image Tools/Color adjust", - "description": "Adjusts hue, saturation, and lightness of an image via real-time GPU shader." + "description": "Adjusts hue, saturation, and lightness of an image using a real-time GPU fragment shader." } ] } diff --git a/blueprints/Image Captioning (gemini).json b/blueprints/Image Captioning (gemini).json index 84d6b7297..f515731bf 100644 --- a/blueprints/Image Captioning (gemini).json +++ b/blueprints/Image Captioning (gemini).json @@ -311,7 +311,7 @@ "workflowRendererVersion": "LG" }, "category": "Text generation/Image Captioning", - "description": "Generates descriptive captions for input images using Google Gemini multimodal LLM." + "description": "Generates descriptive captions for images using Google Gemini's multimodal LLM." } ] } diff --git a/blueprints/Image Channels.json b/blueprints/Image Channels.json index 5db99eeb8..b6fdff5be 100644 --- a/blueprints/Image Channels.json +++ b/blueprints/Image Channels.json @@ -316,7 +316,7 @@ "workflowRendererVersion": "LG" }, "category": "Image Tools/Color adjust", - "description": "Manipulates individual RGBA image channels for masking, compositing, and channel effects." + "description": "Manipulates individual RGBA channels for masking, compositing, and channel effects." } ] } diff --git a/blueprints/Image Edit (FireRed Image Edit 1.1).json b/blueprints/Image Edit (FireRed Image Edit 1.1).json index 5ce5ccb2f..14310353c 100644 --- a/blueprints/Image Edit (FireRed Image Edit 1.1).json +++ b/blueprints/Image Edit (FireRed Image Edit 1.1).json @@ -2139,7 +2139,7 @@ "workflowRendererVersion": "LG" }, "category": "Image generation and editing/Edit image", - "description": "Edits images via text instructions using FireRed Image Edit 1.1 instruction-following model." + "description": "Edits images via text instructions using FireRed Image Edit 1.1, a diffusion-based instruction-following editing model." } ] }, diff --git a/blueprints/Image Edit (Flux.2 Klein 4B).json b/blueprints/Image Edit (Flux.2 Klein 4B).json index facceeae5..dbd53701c 100644 --- a/blueprints/Image Edit (Flux.2 Klein 4B).json +++ b/blueprints/Image Edit (Flux.2 Klein 4B).json @@ -1473,7 +1473,7 @@ "workflowRendererVersion": "LG" }, "category": "Image generation and editing/Edit image", - "description": "Edits images via text instructions using Flux.2 Klein 4B, a distilled 4B parameter model with efficient prompting." + "description": "Edits images via text instructions using FLUX.2 [klein] 4B, the fastest Flux model unifying T2I and editing in one compact distilled architecture." }, { "id": "6007e698-2ebd-4917-84d8-299b35d7b7ab", @@ -1823,7 +1823,7 @@ "extra": { "workflowRendererVersion": "LG" }, - "description": "Applies reference image conditioning for style/identity transfer using Flux.2 Klein 4B." + "description": "Applies reference image conditioning for style and identity transfer (Flux.2 Klein 4B)." } ] }, diff --git a/blueprints/Image Edit (LongCat Image Edit).json b/blueprints/Image Edit (LongCat Image Edit).json index 481b9de52..de1c155a2 100644 --- a/blueprints/Image Edit (LongCat Image Edit).json +++ b/blueprints/Image Edit (LongCat Image Edit).json @@ -1418,7 +1418,7 @@ ], "extra": {}, "category": "Image generation and editing/Edit image", - "description": "Edits images via text instructions using LongCat Image Edit instruction-following diffusion model." + "description": "Edits images via text instructions using LongCat Image Edit, an instruction-following image editing diffusion model." } ] }, diff --git a/blueprints/Image Edit (Qwen 2511).json b/blueprints/Image Edit (Qwen 2511).json index a1b949fae..d37fab1aa 100644 --- a/blueprints/Image Edit (Qwen 2511).json +++ b/blueprints/Image Edit (Qwen 2511).json @@ -1469,7 +1469,7 @@ "VHS_KeepIntermediate": true }, "category": "Image generation and editing/Edit image", - "description": "Edits images via text instructions using Qwen-Image (11-2025 release), Alibaba's instruction-following model." + "description": "Edits images via text instructions using Qwen-Image-Edit-2511 with improved character consistency and integrated LoRA." } ] }, diff --git a/blueprints/Image Inpainting (Flux.1 Fill Dev).json b/blueprints/Image Inpainting (Flux.1 Fill Dev).json index 1c67dab17..a30a5ce77 100644 --- a/blueprints/Image Inpainting (Flux.1 Fill Dev).json +++ b/blueprints/Image Inpainting (Flux.1 Fill Dev).json @@ -1189,7 +1189,7 @@ "workflowRendererVersion": "LG" }, "category": "Image generation and editing/Inpaint image", - "description": "Inpaints masked image regions using Flux.1 Fill Dev, BFL's 12B inpainting-optimized model." + "description": "Inpaints masked image regions using Flux.1 fill [dev], BFL's 12B inpainting/outpainting model with precise prompt following." } ] }, diff --git a/blueprints/Image Inpainting (Qwen-image).json b/blueprints/Image Inpainting (Qwen-image).json index 2951c167e..a06d57e19 100644 --- a/blueprints/Image Inpainting (Qwen-image).json +++ b/blueprints/Image Inpainting (Qwen-image).json @@ -1549,7 +1549,7 @@ "workflowRendererVersion": "LG" }, "category": "Image generation and editing/Inpaint image", - "description": "Inpaints masked image regions using Qwen-Image, Alibaba's multimodal generation model." + "description": "Inpaints masked regions using Qwen-Image, extending its multilingual text rendering to inpainting tasks." }, { "id": "56a1f603-fbd2-40ed-94ef-c9ecbd96aca8", @@ -1909,7 +1909,7 @@ "extra": { "workflowRendererVersion": "LG" }, - "description": "Expands and softens mask edges to reduce visible seams after inpainting." + "description": "Expands and softens mask edges to reduce visible seams after image processing." } ] }, diff --git a/blueprints/Image Levels.json b/blueprints/Image Levels.json index 5afb0dd47..1a1b18932 100644 --- a/blueprints/Image Levels.json +++ b/blueprints/Image Levels.json @@ -743,7 +743,7 @@ "workflowRendererVersion": "LG" }, "category": "Image Tools/Color adjust", - "description": "Adjusts black point, white point, and gamma for precise tonal range control." + "description": "Adjusts black point, white point, and gamma for tonal range control via GPU shader." } ] }, diff --git a/blueprints/Image Outpainting (Qwen-Image).json b/blueprints/Image Outpainting (Qwen-Image).json index f8108f117..6c07227c0 100644 --- a/blueprints/Image Outpainting (Qwen-Image).json +++ b/blueprints/Image Outpainting (Qwen-Image).json @@ -1920,7 +1920,7 @@ "workflowRendererVersion": "LG" }, "category": "Image generation and editing/Outpaint image", - "description": "Extends image content beyond original boundaries using Qwen-Image outpainting." + "description": "Outpaints beyond image boundaries using Qwen-Image's outpainting capabilities." }, { "id": "f93c215e-c393-460e-9534-ed2c3d8a652e", @@ -2280,7 +2280,7 @@ "extra": { "workflowRendererVersion": "LG" }, - "description": "Expands and softens mask edges to reduce visible seams after processing." + "description": "Expands and softens mask edges to reduce visible seams after image processing." }, { "id": "2a4b2cc0-db37-4302-a067-da392f38f06b", @@ -2736,7 +2736,7 @@ "extra": { "workflowRendererVersion": "LG" }, - "description": "Scales both image and mask together while preserving alignment for outpainting workflows." + "description": "Scales both image and mask together while preserving alignment for editing workflows." } ] }, diff --git a/blueprints/Image Upscale(Z-image-Turbo).json b/blueprints/Image Upscale(Z-image-Turbo).json index 9b608c25c..cd80e59a2 100644 --- a/blueprints/Image Upscale(Z-image-Turbo).json +++ b/blueprints/Image Upscale(Z-image-Turbo).json @@ -1303,7 +1303,7 @@ "workflowRendererVersion": "LG" }, "category": "Image generation and editing/Enhance", - "description": "Upscales images to higher resolution using Z-Image-Turbo for detail enhancement." + "description": "Upscales images to higher resolution using Z-Image-Turbo's efficient architecture." } ] }, diff --git a/blueprints/Image to Layers(Qwen-Image-Layered).json b/blueprints/Image to Layers(Qwen-Image-Layered).json index f59f5da9d..7b44f0563 100644 --- a/blueprints/Image to Layers(Qwen-Image-Layered).json +++ b/blueprints/Image to Layers(Qwen-Image-Layered).json @@ -1587,7 +1587,7 @@ "workflowRendererVersion": "LG" }, "category": "Image generation and editing/Image to layers", - "description": "Decomposes an image into separate layers (foreground/background) using Qwen-Image-Layered." + "description": "Decomposes an image into variable-resolution RGBA layers for independent editing using Qwen-Image-Layered." } ] }, diff --git a/blueprints/Image to Model (Hunyuan3d 2.1).json b/blueprints/Image to Model (Hunyuan3d 2.1).json index fb7caa941..a836d85d9 100644 --- a/blueprints/Image to Model (Hunyuan3d 2.1).json +++ b/blueprints/Image to Model (Hunyuan3d 2.1).json @@ -766,7 +766,7 @@ "workflowRendererVersion": "LG" }, "category": "3D/Image to 3D Model", - "description": "Generates a 3D mesh model from a single input image using Hunyuan3D 2.1." + "description": "Generates high-fidelity 3D meshes with textures from a single input image using Hunyuan3D 2.0/2.1." } ] }, diff --git a/blueprints/Image to Video (LTX-2.3).json b/blueprints/Image to Video (LTX-2.3).json index 3b6c8b3ed..7a78d6b31 100644 --- a/blueprints/Image to Video (LTX-2.3).json +++ b/blueprints/Image to Video (LTX-2.3).json @@ -4224,7 +4224,7 @@ "workflowRendererVersion": "Vue-corrected" }, "category": "Video generation and editing/Image to video", - "description": "Generates a short video from a single input image using LTX-2.3, optimized for fast latent video generation." + "description": "Generates a video from a single input image using LTX-2.3, with enhanced fine details and prompt understanding." } ] }, diff --git a/blueprints/Image to Video (Wan 2.2).json b/blueprints/Image to Video (Wan 2.2).json index 6df6ab6b4..e63f8b9fb 100644 --- a/blueprints/Image to Video (Wan 2.2).json +++ b/blueprints/Image to Video (Wan 2.2).json @@ -825,7 +825,7 @@ } }, "widgets_values": [ - "色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走" + "\u8272\u8c03\u8273\u4e3d\uff0c\u8fc7\u66dd\uff0c\u9759\u6001\uff0c\u7ec6\u8282\u6a21\u7cca\u4e0d\u6e05\uff0c\u5b57\u5e55\uff0c\u98ce\u683c\uff0c\u4f5c\u54c1\uff0c\u753b\u4f5c\uff0c\u753b\u9762\uff0c\u9759\u6b62\uff0c\u6574\u4f53\u53d1\u7070\uff0c\u6700\u5dee\u8d28\u91cf\uff0c\u4f4e\u8d28\u91cf\uff0cJPEG\u538b\u7f29\u6b8b\u7559\uff0c\u4e11\u964b\u7684\uff0c\u6b8b\u7f3a\u7684\uff0c\u591a\u4f59\u7684\u624b\u6307\uff0c\u753b\u5f97\u4e0d\u597d\u7684\u624b\u90e8\uff0c\u753b\u5f97\u4e0d\u597d\u7684\u8138\u90e8\uff0c\u7578\u5f62\u7684\uff0c\u6bc1\u5bb9\u7684\uff0c\u5f62\u6001\u7578\u5f62\u7684\u80a2\u4f53\uff0c\u624b\u6307\u878d\u5408\uff0c\u9759\u6b62\u4e0d\u52a8\u7684\u753b\u9762\uff0c\u6742\u4e71\u7684\u80cc\u666f\uff0c\u4e09\u6761\u817f\uff0c\u80cc\u666f\u4eba\u5f88\u591a\uff0c\u5012\u7740\u8d70" ], "color": "#322", "bgcolor": "#533" @@ -1525,7 +1525,7 @@ } }, "widgets_values": [ - "## GPU:RTX4090D 24GB\n\n| Model | Size |VRAM Usage | 1st Generation | 2nd Generation |\n|---------------------|-------|-----------|---------------|-----------------|\n| fp8_scaled |640*640| 84% | ≈ 536s | ≈ 513s |\n| fp8_scaled + 4steps LoRA | 640*640 | 83% | ≈ 97s | ≈ 71s |" + "## GPU:RTX4090D 24GB\n\n| Model | Size |VRAM Usage | 1st Generation | 2nd Generation |\n|---------------------|-------|-----------|---------------|-----------------|\n| fp8_scaled |640*640| 84% | \u2248 536s | \u2248 513s |\n| fp8_scaled + 4steps LoRA | 640*640 | 83% | \u2248 97s | \u2248 71s |" ], "color": "#222", "bgcolor": "#000" @@ -1555,7 +1555,7 @@ } }, "widgets_values": [ - "[Tutorial](https://docs.comfy.org/tutorials/video/wan/wan2_2\n)\n\n**Diffusion Model**\n- [wan2.2_i2v_high_noise_14B_fp8_scaled.safetensors](https://huggingface.co/Comfy-Org/Wan_2.2_ComfyUI_Repackaged/resolve/main/split_files/diffusion_models/wan2.2_i2v_high_noise_14B_fp8_scaled.safetensors)\n- [wan2.2_i2v_low_noise_14B_fp8_scaled.safetensors](https://huggingface.co/Comfy-Org/Wan_2.2_ComfyUI_Repackaged/resolve/main/split_files/diffusion_models/wan2.2_i2v_low_noise_14B_fp8_scaled.safetensors)\n\n**LoRA**\n- [wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors](https://huggingface.co/Comfy-Org/Wan_2.2_ComfyUI_Repackaged/resolve/main/split_files/loras/wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors)\n- [wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors](https://huggingface.co/Comfy-Org/Wan_2.2_ComfyUI_Repackaged/resolve/main/split_files/loras/wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors)\n\n**VAE**\n- [wan_2.1_vae.safetensors](https://huggingface.co/Comfy-Org/Wan_2.2_ComfyUI_Repackaged/resolve/main/split_files/vae/wan_2.1_vae.safetensors)\n\n**Text Encoder** \n- [umt5_xxl_fp8_e4m3fn_scaled.safetensors](https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/resolve/main/split_files/text_encoders/umt5_xxl_fp8_e4m3fn_scaled.safetensors)\n\n\nFile save location\n\n```\nComfyUI/\n├───📂 models/\n│ ├───📂 diffusion_models/\n│ │ ├─── wan2.2_i2v_low_noise_14B_fp8_scaled.safetensors\n│ │ └─── wan2.2_i2v_high_noise_14B_fp8_scaled.safetensors\n│ ├───📂 loras/\n│ │ ├─── wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors\n│ │ └─── wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors\n│ ├───📂 text_encoders/\n│ │ └─── umt5_xxl_fp8_e4m3fn_scaled.safetensors \n│ └───📂 vae/\n│ └── wan_2.1_vae.safetensors\n```\n" + "[Tutorial](https://docs.comfy.org/tutorials/video/wan/wan2_2\n)\n\n**Diffusion Model**\n- [wan2.2_i2v_high_noise_14B_fp8_scaled.safetensors](https://huggingface.co/Comfy-Org/Wan_2.2_ComfyUI_Repackaged/resolve/main/split_files/diffusion_models/wan2.2_i2v_high_noise_14B_fp8_scaled.safetensors)\n- [wan2.2_i2v_low_noise_14B_fp8_scaled.safetensors](https://huggingface.co/Comfy-Org/Wan_2.2_ComfyUI_Repackaged/resolve/main/split_files/diffusion_models/wan2.2_i2v_low_noise_14B_fp8_scaled.safetensors)\n\n**LoRA**\n- [wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors](https://huggingface.co/Comfy-Org/Wan_2.2_ComfyUI_Repackaged/resolve/main/split_files/loras/wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors)\n- [wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors](https://huggingface.co/Comfy-Org/Wan_2.2_ComfyUI_Repackaged/resolve/main/split_files/loras/wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors)\n\n**VAE**\n- [wan_2.1_vae.safetensors](https://huggingface.co/Comfy-Org/Wan_2.2_ComfyUI_Repackaged/resolve/main/split_files/vae/wan_2.1_vae.safetensors)\n\n**Text Encoder** \n- [umt5_xxl_fp8_e4m3fn_scaled.safetensors](https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/resolve/main/split_files/text_encoders/umt5_xxl_fp8_e4m3fn_scaled.safetensors)\n\n\nFile save location\n\n```\nComfyUI/\n\u251c\u2500\u2500\u2500\ud83d\udcc2 models/\n\u2502 \u251c\u2500\u2500\u2500\ud83d\udcc2 diffusion_models/\n\u2502 \u2502 \u251c\u2500\u2500\u2500 wan2.2_i2v_low_noise_14B_fp8_scaled.safetensors\n\u2502 \u2502 \u2514\u2500\u2500\u2500 wan2.2_i2v_high_noise_14B_fp8_scaled.safetensors\n\u2502 \u251c\u2500\u2500\u2500\ud83d\udcc2 loras/\n\u2502 \u2502 \u251c\u2500\u2500\u2500 wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors\n\u2502 \u2502 \u2514\u2500\u2500\u2500 wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors\n\u2502 \u251c\u2500\u2500\u2500\ud83d\udcc2 text_encoders/\n\u2502 \u2502 \u2514\u2500\u2500\u2500 umt5_xxl_fp8_e4m3fn_scaled.safetensors \n\u2502 \u2514\u2500\u2500\u2500\ud83d\udcc2 vae/\n\u2502 \u2514\u2500\u2500 wan_2.1_vae.safetensors\n```\n" ], "color": "#222", "bgcolor": "#000" @@ -2028,7 +2028,7 @@ "workflowRendererVersion": "LG" }, "category": "Video generation and editing/Image to video", - "description": "Generates a video from a single input image using Wan 2.2, Alibaba's diffusion video model." + "description": "Generates a video from a single image and text prompt using Wan2.2, with multi-task T2V and I2V support." } ] }, diff --git a/blueprints/Pose to Image (Z-Image-Turbo).json b/blueprints/Pose to Image (Z-Image-Turbo).json index b61e770bd..f89eec686 100644 --- a/blueprints/Pose to Image (Z-Image-Turbo).json +++ b/blueprints/Pose to Image (Z-Image-Turbo).json @@ -1299,7 +1299,7 @@ "VHS_KeepIntermediate": true }, "category": "Image generation and editing/Pose to image", - "description": "Generates an image from pose keypoint reference using Z-Image-Turbo with text conditioning." + "description": "Generates an image from pose keypoints using Z-Image-Turbo with text conditioning." } ] }, diff --git a/blueprints/Pose to Video (LTX 2.0).json b/blueprints/Pose to Video (LTX 2.0).json index 437cd2938..7440ce6eb 100644 --- a/blueprints/Pose to Video (LTX 2.0).json +++ b/blueprints/Pose to Video (LTX 2.0).json @@ -3871,7 +3871,7 @@ "workflowRendererVersion": "LG" }, "category": "Video generation and editing/Pose to video", - "description": "Generates video from pose reference frames using LTX 2.0 with text conditioning." + "description": "Generates synchronised video+audio from pose reference frames using LTX-2." } ] }, diff --git a/blueprints/Prompt Enhance.json b/blueprints/Prompt Enhance.json index 5acbd5752..e260b1203 100644 --- a/blueprints/Prompt Enhance.json +++ b/blueprints/Prompt Enhance.json @@ -271,7 +271,7 @@ "workflowRendererVersion": "LG" }, "category": "Text generation/Prompt enhance", - "description": "Enhances and expands short text prompts into detailed descriptions using a text generation model." + "description": "Expands short text prompts into detailed descriptions using a text generation model for better generation quality." } ] }, diff --git a/blueprints/Sharpen.json b/blueprints/Sharpen.json index 7b1ee92d1..9f56f8ca6 100644 --- a/blueprints/Sharpen.json +++ b/blueprints/Sharpen.json @@ -267,7 +267,7 @@ "Node name for S&R": "GLSLShader" }, "widgets_values": [ - "#version 300 es\nprecision highp float;\n\nuniform sampler2D u_image0;\nuniform float u_float0; // strength [0.0 – 2.0] typical: 0.3–1.0\n\nin vec2 v_texCoord;\nlayout(location = 0) out vec4 fragColor0;\n\nvoid main() {\n vec2 texel = 1.0 / vec2(textureSize(u_image0, 0));\n \n // Sample center and neighbors\n vec4 center = texture(u_image0, v_texCoord);\n vec4 top = texture(u_image0, v_texCoord + vec2( 0.0, -texel.y));\n vec4 bottom = texture(u_image0, v_texCoord + vec2( 0.0, texel.y));\n vec4 left = texture(u_image0, v_texCoord + vec2(-texel.x, 0.0));\n vec4 right = texture(u_image0, v_texCoord + vec2( texel.x, 0.0));\n \n // Edge enhancement (Laplacian)\n vec4 edges = center * 4.0 - top - bottom - left - right;\n \n // Add edges back scaled by strength\n vec4 sharpened = center + edges * u_float0;\n \n fragColor0 = vec4(clamp(sharpened.rgb, 0.0, 1.0), center.a);\n}", + "#version 300 es\nprecision highp float;\n\nuniform sampler2D u_image0;\nuniform float u_float0; // strength [0.0 \u2013 2.0] typical: 0.3\u20131.0\n\nin vec2 v_texCoord;\nlayout(location = 0) out vec4 fragColor0;\n\nvoid main() {\n vec2 texel = 1.0 / vec2(textureSize(u_image0, 0));\n \n // Sample center and neighbors\n vec4 center = texture(u_image0, v_texCoord);\n vec4 top = texture(u_image0, v_texCoord + vec2( 0.0, -texel.y));\n vec4 bottom = texture(u_image0, v_texCoord + vec2( 0.0, texel.y));\n vec4 left = texture(u_image0, v_texCoord + vec2(-texel.x, 0.0));\n vec4 right = texture(u_image0, v_texCoord + vec2( texel.x, 0.0));\n \n // Edge enhancement (Laplacian)\n vec4 edges = center * 4.0 - top - bottom - left - right;\n \n // Add edges back scaled by strength\n vec4 sharpened = center + edges * u_float0;\n \n fragColor0 = vec4(clamp(sharpened.rgb, 0.0, 1.0), center.a);\n}", "from_input" ] } @@ -303,7 +303,7 @@ "workflowRendererVersion": "LG" }, "category": "Image Tools/Sharpen", - "description": "Sharpens image details to enhance clarity and definition via GPU shader." + "description": "Sharpens image details using a GPU fragment shader for enhanced clarity." } ] } diff --git a/blueprints/Text to Audio (ACE-Step 1.5).json b/blueprints/Text to Audio (ACE-Step 1.5).json index cacce0bfa..6ac1e0bce 100644 --- a/blueprints/Text to Audio (ACE-Step 1.5).json +++ b/blueprints/Text to Audio (ACE-Step 1.5).json @@ -1503,7 +1503,7 @@ "workflowRendererVersion": "LG" }, "category": "Audio/Music generation", - "description": "Generates audio from a text prompt using ACE-Step 1.5, a diffusion-based audio generation model." + "description": "Generates commercial-quality music from text prompts using ACE-Step 1.5, with ~1s generation for a full song on RTX 5090." } ] }, diff --git a/blueprints/Text to Image (Flux.1 Dev).json b/blueprints/Text to Image (Flux.1 Dev).json index 9d7985ce4..ebbab5f34 100644 --- a/blueprints/Text to Image (Flux.1 Dev).json +++ b/blueprints/Text to Image (Flux.1 Dev).json @@ -1030,7 +1030,7 @@ "workflowRendererVersion": "LG" }, "category": "Image generation and editing/Text to image", - "description": "Generates images from text prompts using Flux.1 Dev, BFL's 12B parameter open-weights diffusion model." + "description": "Generates images from text prompts using Flux.1 [dev], BFL's 12B open-weights diffusion model with Pro-level quality." } ] }, diff --git a/blueprints/Text to Image (Flux.1 Krea Dev).json b/blueprints/Text to Image (Flux.1 Krea Dev).json index 1a1b58853..e8a9a86c8 100644 --- a/blueprints/Text to Image (Flux.1 Krea Dev).json +++ b/blueprints/Text to Image (Flux.1 Krea Dev).json @@ -1024,7 +1024,7 @@ "workflowRendererVersion": "LG" }, "category": "Image generation and editing/Text to image", - "description": "Generates images from text prompts using Flux.1 Krea Dev, Krea's fine-tuned variant of Flux Dev." + "description": "Generates images from text prompts using Flux.1 Krea Dev, a BFL \u00d7 Krea collaboration with unique aesthetic styling and natural detail." } ] }, diff --git a/blueprints/Text to Image (NetaYume Lumina).json b/blueprints/Text to Image (NetaYume Lumina).json index 085251ae4..abad72b3a 100644 --- a/blueprints/Text to Image (NetaYume Lumina).json +++ b/blueprints/Text to Image (NetaYume Lumina).json @@ -1105,7 +1105,7 @@ "workflowRendererVersion": "LG" }, "category": "Image generation and editing/Text to image", - "description": "Generates anime-style images from text prompts using NetaYume Lumina, an illustration-optimized model." + "description": "Generates images from text prompts using NetaYume Lumina, a Lumina-Next variant fine-tuned for anime-style and illustration generation." }, { "id": "a07fdf06-1bda-4dac-bdbd-63ee8ebca1c9", @@ -1460,7 +1460,7 @@ "extra": { "workflowRendererVersion": "LG" }, - "description": "Encodes a negative text prompt using CLIP for classifier-free guidance in anime-style generation." + "description": "Encodes a negative text prompt via CLIP for classifier-free guidance in anime-style generation (NetaYume Lumina)." } ] }, diff --git a/blueprints/Text to Image (Qwen-Image 2512).json b/blueprints/Text to Image (Qwen-Image 2512).json index a33b3e23d..9e666de95 100644 --- a/blueprints/Text to Image (Qwen-Image 2512).json +++ b/blueprints/Text to Image (Qwen-Image 2512).json @@ -751,7 +751,7 @@ "secondTabWidth": 65 }, "widgets_values": [ - "低分辨率,低画质,肢体畸形,手指畸形,画面过饱和,蜡像感,人脸无细节,过度光滑,画面具有AI感。构图混乱。文字模糊,扭曲" + "\u4f4e\u5206\u8fa8\u7387\uff0c\u4f4e\u753b\u8d28\uff0c\u80a2\u4f53\u7578\u5f62\uff0c\u624b\u6307\u7578\u5f62\uff0c\u753b\u9762\u8fc7\u9971\u548c\uff0c\u8721\u50cf\u611f\uff0c\u4eba\u8138\u65e0\u7ec6\u8282\uff0c\u8fc7\u5ea6\u5149\u6ed1\uff0c\u753b\u9762\u5177\u6709AI\u611f\u3002\u6784\u56fe\u6df7\u4e71\u3002\u6587\u5b57\u6a21\u7cca\uff0c\u626d\u66f2" ], "color": "#322", "bgcolor": "#533" @@ -1942,7 +1942,7 @@ "workflowRendererVersion": "Vue-corrected" }, "category": "Image generation and editing/Text to image", - "description": "Generates images from text prompts using Qwen-Image (12-2025), Alibaba's latest image generation model." + "description": "Generates images from text prompts using Qwen-Image-2512, with enhanced human realism and finer natural detail over the base version." } ] }, diff --git a/blueprints/Text to Image (Qwen-Image).json b/blueprints/Text to Image (Qwen-Image).json index 6ee1dfd63..e78d5a962 100644 --- a/blueprints/Text to Image (Qwen-Image).json +++ b/blueprints/Text to Image (Qwen-Image).json @@ -1874,7 +1874,7 @@ "workflowRendererVersion": "LG" }, "category": "Image generation and editing/Text to image", - "description": "Generates images from text prompts using Qwen-Image, Alibaba's multimodal text-to-image model." + "description": "Generates images from text prompts using Qwen-Image, Alibaba's 20B MMDiT model with excellent multilingual text rendering." } ] }, diff --git a/blueprints/Text to Image (Z-Image-Turbo).json b/blueprints/Text to Image (Z-Image-Turbo).json index f50b2cb34..4c1b4b888 100644 --- a/blueprints/Text to Image (Z-Image-Turbo).json +++ b/blueprints/Text to Image (Z-Image-Turbo).json @@ -1055,7 +1055,7 @@ "workflowRendererVersion": "LG" }, "category": "Image generation and editing/Text to image", - "description": "Generates images from text prompts using Z-Image-Turbo for fast text-to-image generation." + "description": "Generates images from text prompts using Z-Image-Turbo, Alibaba's distilled 6B DiT with sub-second inference on consumer GPUs." } ] }, diff --git a/blueprints/Text to Video (LTX-2.3).json b/blueprints/Text to Video (LTX-2.3).json index 1bee2e675..bb2168022 100644 --- a/blueprints/Text to Video (LTX-2.3).json +++ b/blueprints/Text to Video (LTX-2.3).json @@ -4287,7 +4287,7 @@ "workflowRendererVersion": "Vue-corrected" }, "category": "Video generation and editing/Text to video", - "description": "Generates video from text prompts using LTX-2.3, a lightweight transformer-based video diffusion model." + "description": "Generates video from text prompts using LTX-2.3, Lightricks' open-source DiT with improved details, portrait support, and audio-video sync." } ] }, diff --git a/blueprints/Text to Video (Wan 2.2).json b/blueprints/Text to Video (Wan 2.2).json index 2ba208c94..07c662179 100644 --- a/blueprints/Text to Video (Wan 2.2).json +++ b/blueprints/Text to Video (Wan 2.2).json @@ -1160,7 +1160,7 @@ "title": "Model Links", "properties": {}, "widgets_values": [ - "[Tutorial](https://docs.comfy.org/tutorials/video/wan/wan2_2\n) | [教程](https://docs.comfy.org/zh-CN/tutorials/video/wan/wan2_2\n)\n\n**Diffusion Model** \n- [wan2.2_t2v_high_noise_14B_fp8_scaled.safetensors](https://huggingface.co/Comfy-Org/Wan_2.2_ComfyUI_Repackaged/resolve/main/split_files/diffusion_models/wan2.2_t2v_high_noise_14B_fp8_scaled.safetensors)\n- [wan2.2_t2v_low_noise_14B_fp8_scaled.safetensors](https://huggingface.co/Comfy-Org/Wan_2.2_ComfyUI_Repackaged/resolve/main/split_files/diffusion_models/wan2.2_t2v_low_noise_14B_fp8_scaled.safetensors)\n\n**LoRA**\n\n- [wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors](https://huggingface.co/Comfy-Org/Wan_2.2_ComfyUI_Repackaged/resolve/main/split_files/loras/wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors)\n- [wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors](https://huggingface.co/Comfy-Org/Wan_2.2_ComfyUI_Repackaged/resolve/main/split_files/loras/wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors)\n\n**VAE**\n- [wan_2.1_vae.safetensors](https://huggingface.co/Comfy-Org/Wan_2.2_ComfyUI_Repackaged/resolve/main/split_files/vae/wan_2.1_vae.safetensors)\n\n**Text Encoder** \n- [umt5_xxl_fp8_e4m3fn_scaled.safetensors](https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/resolve/main/split_files/text_encoders/umt5_xxl_fp8_e4m3fn_scaled.safetensors)\n\n\nFile save location\n\n```\nComfyUI/\n├───📂 models/\n│ ├───📂 diffusion_models/\n│ │ ├─── wan2.2_t2v_low_noise_14B_fp8_scaled.safetensors\n│ │ └─── wan2.2_t2v_high_noise_14B_fp8_scaled.safetensors\n│ ├───📂 loras/\n│ │ ├───wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors\n│ │ └───wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors\n│ ├───📂 text_encoders/\n│ │ └─── umt5_xxl_fp8_e4m3fn_scaled.safetensors \n│ └───📂 vae/\n│ └── wan_2.1_vae.safetensors\n```\n" + "[Tutorial](https://docs.comfy.org/tutorials/video/wan/wan2_2\n) | [\u6559\u7a0b](https://docs.comfy.org/zh-CN/tutorials/video/wan/wan2_2\n)\n\n**Diffusion Model** \n- [wan2.2_t2v_high_noise_14B_fp8_scaled.safetensors](https://huggingface.co/Comfy-Org/Wan_2.2_ComfyUI_Repackaged/resolve/main/split_files/diffusion_models/wan2.2_t2v_high_noise_14B_fp8_scaled.safetensors)\n- [wan2.2_t2v_low_noise_14B_fp8_scaled.safetensors](https://huggingface.co/Comfy-Org/Wan_2.2_ComfyUI_Repackaged/resolve/main/split_files/diffusion_models/wan2.2_t2v_low_noise_14B_fp8_scaled.safetensors)\n\n**LoRA**\n\n- [wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors](https://huggingface.co/Comfy-Org/Wan_2.2_ComfyUI_Repackaged/resolve/main/split_files/loras/wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors)\n- [wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors](https://huggingface.co/Comfy-Org/Wan_2.2_ComfyUI_Repackaged/resolve/main/split_files/loras/wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors)\n\n**VAE**\n- [wan_2.1_vae.safetensors](https://huggingface.co/Comfy-Org/Wan_2.2_ComfyUI_Repackaged/resolve/main/split_files/vae/wan_2.1_vae.safetensors)\n\n**Text Encoder** \n- [umt5_xxl_fp8_e4m3fn_scaled.safetensors](https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/resolve/main/split_files/text_encoders/umt5_xxl_fp8_e4m3fn_scaled.safetensors)\n\n\nFile save location\n\n```\nComfyUI/\n\u251c\u2500\u2500\u2500\ud83d\udcc2 models/\n\u2502 \u251c\u2500\u2500\u2500\ud83d\udcc2 diffusion_models/\n\u2502 \u2502 \u251c\u2500\u2500\u2500 wan2.2_t2v_low_noise_14B_fp8_scaled.safetensors\n\u2502 \u2502 \u2514\u2500\u2500\u2500 wan2.2_t2v_high_noise_14B_fp8_scaled.safetensors\n\u2502 \u251c\u2500\u2500\u2500\ud83d\udcc2 loras/\n\u2502 \u2502 \u251c\u2500\u2500\u2500wan2.2_t2v_lightx2v_4steps_lora_v1.1_low_noise.safetensors\n\u2502 \u2502 \u2514\u2500\u2500\u2500wan2.2_t2v_lightx2v_4steps_lora_v1.1_high_noise.safetensors\n\u2502 \u251c\u2500\u2500\u2500\ud83d\udcc2 text_encoders/\n\u2502 \u2502 \u2514\u2500\u2500\u2500 umt5_xxl_fp8_e4m3fn_scaled.safetensors \n\u2502 \u2514\u2500\u2500\u2500\ud83d\udcc2 vae/\n\u2502 \u2514\u2500\u2500 wan_2.1_vae.safetensors\n```\n" ], "color": "#222", "bgcolor": "#000" @@ -1263,7 +1263,7 @@ "Node name for S&R": "CLIPTextEncode" }, "widgets_values": [ - "色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走,裸露,NSFW" + "\u8272\u8c03\u8273\u4e3d\uff0c\u8fc7\u66dd\uff0c\u9759\u6001\uff0c\u7ec6\u8282\u6a21\u7cca\u4e0d\u6e05\uff0c\u5b57\u5e55\uff0c\u98ce\u683c\uff0c\u4f5c\u54c1\uff0c\u753b\u4f5c\uff0c\u753b\u9762\uff0c\u9759\u6b62\uff0c\u6574\u4f53\u53d1\u7070\uff0c\u6700\u5dee\u8d28\u91cf\uff0c\u4f4e\u8d28\u91cf\uff0cJPEG\u538b\u7f29\u6b8b\u7559\uff0c\u4e11\u964b\u7684\uff0c\u6b8b\u7f3a\u7684\uff0c\u591a\u4f59\u7684\u624b\u6307\uff0c\u753b\u5f97\u4e0d\u597d\u7684\u624b\u90e8\uff0c\u753b\u5f97\u4e0d\u597d\u7684\u8138\u90e8\uff0c\u7578\u5f62\u7684\uff0c\u6bc1\u5bb9\u7684\uff0c\u5f62\u6001\u7578\u5f62\u7684\u80a2\u4f53\uff0c\u624b\u6307\u878d\u5408\uff0c\u9759\u6b62\u4e0d\u52a8\u7684\u753b\u9762\uff0c\u6742\u4e71\u7684\u80cc\u666f\uff0c\u4e09\u6761\u817f\uff0c\u80cc\u666f\u4eba\u5f88\u591a\uff0c\u5012\u7740\u8d70\uff0c\u88f8\u9732\uff0cNSFW" ], "color": "#322", "bgcolor": "#533" @@ -1573,7 +1573,7 @@ "workflowRendererVersion": "LG" }, "category": "Video generation and editing/Text to video", - "description": "Generates video from text prompts using Wan 2.2, Alibaba's open-source text-to-video diffusion model." + "description": "Generates video from text prompts using Wan2.2, Alibaba's open-source diffusion video model with 14B/1.3B parameter variants." } ] }, diff --git a/blueprints/Unsharp Mask.json b/blueprints/Unsharp Mask.json index a006d0311..79a4c954f 100644 --- a/blueprints/Unsharp Mask.json +++ b/blueprints/Unsharp Mask.json @@ -435,7 +435,7 @@ "workflowRendererVersion": "LG" }, "category": "Image Tools/Sharpen", - "description": "Enhances edge contrast using unsharp masking for a sharper image appearance." + "description": "Enhances edge contrast via unsharp masking for a sharper image appearance." } ] } diff --git a/blueprints/Video Captioning (Gemini).json b/blueprints/Video Captioning (Gemini).json index d6885a33f..688371483 100644 --- a/blueprints/Video Captioning (Gemini).json +++ b/blueprints/Video Captioning (Gemini).json @@ -308,7 +308,7 @@ "workflowRendererVersion": "LG" }, "category": "Text generation/Video Captioning", - "description": "Generates descriptive captions for video input using Google Gemini multimodal LLM." + "description": "Generates descriptive captions for video input using Google Gemini's multimodal LLM." } ] } diff --git a/blueprints/Video Inpaint(Wan2.1 VACE).json b/blueprints/Video Inpaint(Wan2.1 VACE).json index 0ae88d4bb..c428e4703 100644 --- a/blueprints/Video Inpaint(Wan2.1 VACE).json +++ b/blueprints/Video Inpaint(Wan2.1 VACE).json @@ -1116,7 +1116,7 @@ "title": "About Video Size", "properties": {}, "widgets_values": [ - "| Model | 480P | 720P |\n| ------------------------------------------------------------ | ---- | ---- |\n| [VACE-1.3B](https://huggingface.co/Wan-AI/Wan2.1-VACE-1.3B) | ✅ | ❌ |\n| [VACE-14B](https://huggingface.co/Wan-AI/Wan2.1-VACE-14B) | ✅ | ✅ |" + "| Model | 480P | 720P |\n| ------------------------------------------------------------ | ---- | ---- |\n| [VACE-1.3B](https://huggingface.co/Wan-AI/Wan2.1-VACE-1.3B) | \u2705 | \u274c |\n| [VACE-14B](https://huggingface.co/Wan-AI/Wan2.1-VACE-14B) | \u2705 | \u2705 |" ], "color": "#432", "bgcolor": "#000" @@ -1516,7 +1516,7 @@ "widget_ue_connectable": {} }, "widgets_values": [ - "Currently, it's difficult to perfectly draw dynamic masks for different frames using only core nodes. However, to avoid requiring users to install additional custom nodes, our templates only use core nodes. You can refer to this implementation idea to achieve video inpainting.\n\nYou can use KJNode’s Points Editor and Sam2Segmentation to create some dynamic mask functions.\n\nCustom node links:\n- [ComfyUI-KJNodes](https://github.com/kijai/ComfyUI-KJNodes)\n- [ComfyUI-segment-anything-2](https://github.com/kijai/ComfyUI-segment-anything-2)" + "Currently, it's difficult to perfectly draw dynamic masks for different frames using only core nodes. However, to avoid requiring users to install additional custom nodes, our templates only use core nodes. You can refer to this implementation idea to achieve video inpainting.\n\nYou can use KJNode\u2019s Points Editor and Sam2Segmentation to create some dynamic mask functions.\n\nCustom node links:\n- [ComfyUI-KJNodes](https://github.com/kijai/ComfyUI-KJNodes)\n- [ComfyUI-segment-anything-2](https://github.com/kijai/ComfyUI-segment-anything-2)" ], "color": "#432", "bgcolor": "#000" @@ -1578,7 +1578,7 @@ "widget_ue_connectable": {} }, "widgets_values": [ - "过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走,过曝," + "\u8fc7\u66dd\uff0c\u9759\u6001\uff0c\u7ec6\u8282\u6a21\u7cca\u4e0d\u6e05\uff0c\u5b57\u5e55\uff0c\u98ce\u683c\uff0c\u4f5c\u54c1\uff0c\u753b\u4f5c\uff0c\u753b\u9762\uff0c\u9759\u6b62\uff0c\u6574\u4f53\u53d1\u7070\uff0c\u6700\u5dee\u8d28\u91cf\uff0c\u4f4e\u8d28\u91cf\uff0cJPEG\u538b\u7f29\u6b8b\u7559\uff0c\u4e11\u964b\u7684\uff0c\u6b8b\u7f3a\u7684\uff0c\u591a\u4f59\u7684\u624b\u6307\uff0c\u753b\u5f97\u4e0d\u597d\u7684\u624b\u90e8\uff0c\u753b\u5f97\u4e0d\u597d\u7684\u8138\u90e8\uff0c\u7578\u5f62\u7684\uff0c\u6bc1\u5bb9\u7684\uff0c\u5f62\u6001\u7578\u5f62\u7684\u80a2\u4f53\uff0c\u624b\u6307\u878d\u5408\uff0c\u9759\u6b62\u4e0d\u52a8\u7684\u753b\u9762\uff0c\u6742\u4e71\u7684\u80cc\u666f\uff0c\u4e09\u6761\u817f\uff0c\u80cc\u666f\u4eba\u5f88\u591a\uff0c\u5012\u7740\u8d70,\u8fc7\u66dd\uff0c" ], "color": "#223", "bgcolor": "#335" @@ -2369,7 +2369,7 @@ "workflowRendererVersion": "LG" }, "category": "Video generation and editing/Inpaint video", - "description": "Inpaints masked regions in video frames using Wan2.1 VACE video inpainting model." + "description": "Inpaints masked regions in video using Wan2.1 VACE for frame-consistent video inpainting." } ] }, diff --git a/blueprints/Video Stitch.json b/blueprints/Video Stitch.json index a7779359a..6eb0f0bbf 100644 --- a/blueprints/Video Stitch.json +++ b/blueprints/Video Stitch.json @@ -585,7 +585,7 @@ "workflowRendererVersion": "LG" }, "category": "Video Tools/Stitch videos", - "description": "Stitches multiple video clips together into a single sequential video file." + "description": "Stitches multiple video clips into a single sequential video file." } ] } diff --git a/blueprints/Video Upscale(GAN x4).json b/blueprints/Video Upscale(GAN x4).json index 1662886f1..d71292e82 100644 --- a/blueprints/Video Upscale(GAN x4).json +++ b/blueprints/Video Upscale(GAN x4).json @@ -413,7 +413,7 @@ "workflowRendererVersion": "LG" }, "category": "Video generation and editing/Enhance video", - "description": "Upscales video to 4× resolution using GAN-based real-time video upscaling." + "description": "Upscales video to 4\u00d7 resolution using a GAN-based real-time video super-resolution model." } ] },