ComfyUI/comfy/web/templates/txt_to_image_to_video.json
doctorpangloss 048746f58b Update to 0.3.15 and improve models
- Cosmos now fully tested
 - Preliminary support for essential Cosmos prompt "upsampler"
 - Lumina tests
 - Tweaks to language and image resizing nodes
 - Fix for #31 all the samplers are now present again
2025-02-24 21:27:15 -08:00

536 lines
11 KiB
JSON
Vendored

{
"last_node_id": 23,
"last_link_id": 40,
"nodes": [
{
"id": 3,
"type": "KSampler",
"pos": [1843.74, 476.56],
"size": [315, 262],
"flags": {},
"order": 11,
"mode": 0,
"inputs": [
{
"name": "model",
"type": "MODEL",
"link": 39
},
{
"name": "positive",
"type": "CONDITIONING",
"link": 40
},
{
"name": "negative",
"type": "CONDITIONING",
"link": 17
},
{
"name": "latent_image",
"type": "LATENT",
"link": 18
}
],
"outputs": [
{
"name": "LATENT",
"type": "LATENT",
"links": [7],
"slot_index": 0
}
],
"properties": {
"Node name for S&R": "KSampler"
},
"widgets_values": [
237514639057514,
"randomize",
20,
2.5,
"euler",
"karras",
1
]
},
{
"id": 8,
"type": "VAEDecode",
"pos": [2183.74, 476.56],
"size": [210, 46],
"flags": {},
"order": 12,
"mode": 0,
"inputs": [
{
"name": "samples",
"type": "LATENT",
"link": 7
},
{
"name": "vae",
"type": "VAE",
"link": 26
}
],
"outputs": [
{
"name": "IMAGE",
"type": "IMAGE",
"links": [10],
"slot_index": 0
}
],
"properties": {
"Node name for S&R": "VAEDecode"
},
"widgets_values": []
},
{
"id": 10,
"type": "SaveAnimatedWEBP",
"pos": [1654, 829],
"size": [741.67, 564.59],
"flags": {},
"order": 13,
"mode": 0,
"inputs": [
{
"name": "images",
"type": "IMAGE",
"link": 10
}
],
"outputs": [],
"properties": {
"Node name for S&R": "SaveAnimatedWEBP"
},
"widgets_values": ["ComfyUI", 10, false, 85, "default"]
},
{
"id": 12,
"type": "SVD_img2vid_Conditioning",
"pos": [1463.74, 496.56],
"size": [315, 218],
"flags": {},
"order": 9,
"mode": 0,
"inputs": [
{
"name": "clip_vision",
"type": "CLIP_VISION",
"link": 24
},
{
"name": "init_image",
"type": "IMAGE",
"link": 35,
"slot_index": 1
},
{
"name": "vae",
"type": "VAE",
"link": 25
}
],
"outputs": [
{
"name": "positive",
"type": "CONDITIONING",
"shape": 3,
"links": [40],
"slot_index": 0
},
{
"name": "negative",
"type": "CONDITIONING",
"shape": 3,
"links": [17],
"slot_index": 1
},
{
"name": "latent",
"type": "LATENT",
"shape": 3,
"links": [18],
"slot_index": 2
}
],
"properties": {
"Node name for S&R": "SVD_img2vid_Conditioning"
},
"widgets_values": [1024, 576, 25, 127, 6, 0]
},
{
"id": 14,
"type": "VideoLinearCFGGuidance",
"pos": [1463.74, 366.56],
"size": [315, 58],
"flags": {},
"order": 4,
"mode": 0,
"inputs": [
{
"name": "model",
"type": "MODEL",
"link": 23
}
],
"outputs": [
{
"name": "MODEL",
"type": "MODEL",
"shape": 3,
"links": [39],
"slot_index": 0
}
],
"properties": {
"Node name for S&R": "VideoLinearCFGGuidance"
},
"widgets_values": [1]
},
{
"id": 15,
"type": "ImageOnlyCheckpointLoader",
"pos": [1050, 320],
"size": [369.6, 98],
"flags": {},
"order": 0,
"mode": 0,
"inputs": [],
"outputs": [
{
"name": "MODEL",
"type": "MODEL",
"shape": 3,
"links": [23],
"slot_index": 0
},
{
"name": "CLIP_VISION",
"type": "CLIP_VISION",
"shape": 3,
"links": [24],
"slot_index": 1
},
{
"name": "VAE",
"type": "VAE",
"shape": 3,
"links": [25, 26],
"slot_index": 2
}
],
"properties": {
"Node name for S&R": "ImageOnlyCheckpointLoader"
},
"widgets_values": ["svd_xt.safetensors"]
},
{
"id": 16,
"type": "CheckpointLoaderSimple",
"pos": [0, 510],
"size": [315, 98],
"flags": {},
"order": 1,
"mode": 0,
"inputs": [],
"outputs": [
{
"name": "MODEL",
"type": "MODEL",
"shape": 3,
"links": [28],
"slot_index": 0
},
{
"name": "CLIP",
"type": "CLIP",
"shape": 3,
"links": [29, 31],
"slot_index": 1
},
{
"name": "VAE",
"type": "VAE",
"shape": 3,
"links": [34]
}
],
"properties": {
"Node name for S&R": "CheckpointLoaderSimple"
},
"widgets_values": ["sd_xl_base_1.0.safetensors"]
},
{
"id": 17,
"type": "KSampler",
"pos": [802.4, 566.4],
"size": [315, 262],
"flags": {},
"order": 7,
"mode": 0,
"inputs": [
{
"name": "model",
"type": "MODEL",
"link": 28
},
{
"name": "positive",
"type": "CONDITIONING",
"link": 30
},
{
"name": "negative",
"type": "CONDITIONING",
"link": 32
},
{
"name": "latent_image",
"type": "LATENT",
"link": 37,
"slot_index": 3
}
],
"outputs": [
{
"name": "LATENT",
"type": "LATENT",
"shape": 3,
"links": [33],
"slot_index": 0
}
],
"properties": {
"Node name for S&R": "KSampler"
},
"widgets_values": [
144698910769133,
"randomize",
15,
8,
"uni_pc_bh2",
"normal",
1
]
},
{
"id": 18,
"type": "CLIPTextEncode",
"pos": [342.4, 516.4],
"size": [390, 130],
"flags": {},
"order": 5,
"mode": 0,
"inputs": [
{
"name": "clip",
"type": "CLIP",
"link": 29
}
],
"outputs": [
{
"name": "CONDITIONING",
"type": "CONDITIONING",
"shape": 3,
"links": [30],
"slot_index": 0
}
],
"properties": {
"Node name for S&R": "CLIPTextEncode"
},
"widgets_values": [
"photograph beautiful scenery nature mountains alps river rapids snow sky cumulus clouds"
]
},
{
"id": 19,
"type": "CLIPTextEncode",
"pos": [342.4, 696.4],
"size": [390, 130],
"flags": {},
"order": 6,
"mode": 0,
"inputs": [
{
"name": "clip",
"type": "CLIP",
"link": 31
}
],
"outputs": [
{
"name": "CONDITIONING",
"type": "CONDITIONING",
"shape": 3,
"links": [32],
"slot_index": 0
}
],
"properties": {
"Node name for S&R": "CLIPTextEncode"
},
"widgets_values": ["text, watermark"]
},
{
"id": 20,
"type": "VAEDecode",
"pos": [1172.4, 566.4],
"size": [210, 46],
"flags": {},
"order": 8,
"mode": 0,
"inputs": [
{
"name": "samples",
"type": "LATENT",
"link": 33
},
{
"name": "vae",
"type": "VAE",
"link": 34,
"slot_index": 1
}
],
"outputs": [
{
"name": "IMAGE",
"type": "IMAGE",
"shape": 3,
"links": [35, 36],
"slot_index": 0
}
],
"properties": {
"Node name for S&R": "VAEDecode"
},
"widgets_values": []
},
{
"id": 21,
"type": "PreviewImage",
"pos": [1152.4, 656.4],
"size": [275.95, 246],
"flags": {},
"order": 10,
"mode": 0,
"inputs": [
{
"name": "images",
"type": "IMAGE",
"link": 36
}
],
"outputs": [],
"properties": {
"Node name for S&R": "PreviewImage"
},
"widgets_values": []
},
{
"id": 22,
"type": "EmptyLatentImage",
"pos": [422.4, 866.4],
"size": [310, 110],
"flags": {},
"order": 2,
"mode": 0,
"inputs": [],
"outputs": [
{
"name": "LATENT",
"type": "LATENT",
"shape": 3,
"links": [37]
}
],
"properties": {
"Node name for S&R": "EmptyLatentImage"
},
"widgets_values": [1024, 576, 1]
},
{
"id": 23,
"type": "MarkdownNote",
"pos": [0, 660],
"size": [225, 60],
"flags": {},
"order": 3,
"mode": 0,
"inputs": [],
"outputs": [],
"properties": {},
"widgets_values": [
"\ud83d\udec8 [Learn more about this workflow](https://comfyanonymous.github.io/ComfyUI_examples/video/#image-to-video)"
],
"color": "#432",
"bgcolor": "#653"
}
],
"links": [
[7, 3, 0, 8, 0, "LATENT"],
[10, 8, 0, 10, 0, "IMAGE"],
[17, 12, 1, 3, 2, "CONDITIONING"],
[18, 12, 2, 3, 3, "LATENT"],
[23, 15, 0, 14, 0, "MODEL"],
[24, 15, 1, 12, 0, "CLIP_VISION"],
[25, 15, 2, 12, 2, "VAE"],
[26, 15, 2, 8, 1, "VAE"],
[28, 16, 0, 17, 0, "MODEL"],
[29, 16, 1, 18, 0, "CLIP"],
[30, 18, 0, 17, 1, "CONDITIONING"],
[31, 16, 1, 19, 0, "CLIP"],
[32, 19, 0, 17, 2, "CONDITIONING"],
[33, 17, 0, 20, 0, "LATENT"],
[34, 16, 2, 20, 1, "VAE"],
[35, 20, 0, 12, 1, "IMAGE"],
[36, 20, 0, 21, 0, "IMAGE"],
[37, 22, 0, 17, 3, "LATENT"],
[39, 14, 0, 3, 0, "MODEL"],
[40, 12, 0, 3, 1, "CONDITIONING"]
],
"groups": [
{
"id": 1,
"title": "Image to Video",
"bounding": [1455, 300, 954, 478],
"color": "#8A8",
"font_size": 24,
"flags": {}
},
{
"id": 2,
"title": "Text to Image",
"bounding": [330, 435, 1106, 544],
"color": "#3f789e",
"font_size": 24,
"flags": {}
}
],
"config": {},
"extra": {
"ds": {
"scale": 1.13,
"offset": [502.97, -29.59]
}
},
"version": 0.4,
"models": [
{
"name": "sd_xl_base_1.0.safetensors",
"url": "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/sd_xl_base_1.0.safetensors?download=true",
"directory": "checkpoints"
},
{
"name": "svd_xt.safetensors",
"url": "https://huggingface.co/stabilityai/stable-video-diffusion-img2vid-xt/resolve/main/svd_xt.safetensors?download=true",
"directory": "checkpoints"
}
]
}