{ "last_node_id": 77, "last_link_id": 182, "nodes": [ { "id": 38, "type": "CLIPLoader", "pos": [60, 190], "size": [315, 82], "flags": {}, "order": 0, "mode": 0, "inputs": [], "outputs": [ { "name": "CLIP", "type": "CLIP", "links": [74, 75], "slot_index": 0 } ], "properties": { "Node name for S&R": "CLIPLoader" }, "widgets_values": ["t5xxl_fp16.safetensors", "ltxv", "default"] }, { "id": 8, "type": "VAEDecode", "pos": [1600, 30], "size": [210, 46], "flags": {}, "order": 11, "mode": 0, "inputs": [ { "name": "samples", "type": "LATENT", "link": 171 }, { "name": "vae", "type": "VAE", "link": 87 } ], "outputs": [ { "name": "IMAGE", "type": "IMAGE", "links": [106], "slot_index": 0 } ], "properties": { "Node name for S&R": "VAEDecode" }, "widgets_values": [] }, { "id": 69, "type": "LTXVConditioning", "pos": [920, 60], "size": [223.87, 78], "flags": {}, "order": 9, "mode": 0, "inputs": [ { "name": "positive", "type": "CONDITIONING", "link": 169 }, { "name": "negative", "type": "CONDITIONING", "link": 170 } ], "outputs": [ { "name": "positive", "type": "CONDITIONING", "links": [166], "slot_index": 0 }, { "name": "negative", "type": "CONDITIONING", "links": [167], "slot_index": 1 } ], "properties": { "Node name for S&R": "LTXVConditioning" }, "widgets_values": [25] }, { "id": 72, "type": "SamplerCustom", "pos": [1201, 32], "size": [355.2, 230], "flags": {}, "order": 10, "mode": 0, "inputs": [ { "name": "model", "type": "MODEL", "link": 181 }, { "name": "positive", "type": "CONDITIONING", "link": 166 }, { "name": "negative", "type": "CONDITIONING", "link": 167 }, { "name": "sampler", "type": "SAMPLER", "link": 172 }, { "name": "sigmas", "type": "SIGMAS", "link": 182 }, { "name": "latent_image", "type": "LATENT", "link": 175 } ], "outputs": [ { "name": "output", "type": "LATENT", "links": [171], "slot_index": 0 }, { "name": "denoised_output", "type": "LATENT", "links": null } ], "properties": { "Node name for S&R": "SamplerCustom" }, "widgets_values": [true, 497797676867141, "randomize", 3] }, { "id": 44, "type": "CheckpointLoaderSimple", "pos": [520, 30], "size": [315, 98], "flags": {}, "order": 1, "mode": 0, "inputs": [], "outputs": [ { "name": "MODEL", "type": "MODEL", "links": [181], "slot_index": 0 }, { "name": "CLIP", "type": "CLIP", "links": null }, { "name": "VAE", "type": "VAE", "links": [87], "slot_index": 2 } ], "properties": { "Node name for S&R": "CheckpointLoaderSimple" }, "widgets_values": ["ltx-video-2b-v0.9.safetensors"] }, { "id": 70, "type": "EmptyLTXVLatentVideo", "pos": [860, 240], "size": [315, 130], "flags": {}, "order": 2, "mode": 0, "inputs": [], "outputs": [ { "name": "LATENT", "type": "LATENT", "links": [168, 175], "slot_index": 0 } ], "properties": { "Node name for S&R": "EmptyLTXVLatentVideo" }, "widgets_values": [768, 512, 97, 1] }, { "id": 71, "type": "LTXVScheduler", "pos": [856, 531], "size": [315, 154], "flags": {}, "order": 8, "mode": 0, "inputs": [ { "name": "latent", "type": "LATENT", "shape": 7, "link": 168 } ], "outputs": [ { "name": "SIGMAS", "type": "SIGMAS", "links": [182], "slot_index": 0 } ], "properties": { "Node name for S&R": "LTXVScheduler" }, "widgets_values": [30, 2.05, 0.95, true, 0.1] }, { "id": 6, "type": "CLIPTextEncode", "pos": [420, 190], "size": [422.85, 164.31], "flags": {}, "order": 6, "mode": 0, "inputs": [ { "name": "clip", "type": "CLIP", "link": 74 } ], "outputs": [ { "name": "CONDITIONING", "type": "CONDITIONING", "links": [169], "slot_index": 0 } ], "title": "CLIP Text Encode (Positive Prompt)", "properties": { "Node name for S&R": "CLIPTextEncode" }, "widgets_values": [ "A woman with long brown hair and light skin smiles at another woman with long blonde hair. The woman with brown hair wears a black jacket and has a small, barely noticeable mole on her right cheek. The camera angle is a close-up, focused on the woman with brown hair's face. The lighting is warm and natural, likely from the setting sun, casting a soft glow on the scene. The scene appears to be real-life footage." ], "color": "#232", "bgcolor": "#353" }, { "id": 7, "type": "CLIPTextEncode", "pos": [420, 390], "size": [425.28, 180.61], "flags": {}, "order": 7, "mode": 0, "inputs": [ { "name": "clip", "type": "CLIP", "link": 75 } ], "outputs": [ { "name": "CONDITIONING", "type": "CONDITIONING", "links": [170], "slot_index": 0 } ], "title": "CLIP Text Encode (Negative Prompt)", "properties": { "Node name for S&R": "CLIPTextEncode" }, "widgets_values": [ "low quality, worst quality, deformed, distorted, disfigured, motion smear, motion artifacts, fused fingers, bad anatomy, weird hand, ugly" ], "color": "#322", "bgcolor": "#533" }, { "id": 73, "type": "KSamplerSelect", "pos": [860, 420], "size": [315, 58], "flags": {}, "order": 3, "mode": 0, "inputs": [], "outputs": [ { "name": "SAMPLER", "type": "SAMPLER", "links": [172] } ], "properties": { "Node name for S&R": "KSamplerSelect" }, "widgets_values": ["euler"] }, { "id": 76, "type": "Note", "pos": [40, 350], "size": [360, 200], "flags": {}, "order": 4, "mode": 0, "inputs": [], "outputs": [], "properties": {}, "widgets_values": [ "This model needs long descriptive prompts, if the prompt is too short the quality will suffer greatly." ], "color": "#432", "bgcolor": "#653" }, { "id": 41, "type": "SaveAnimatedWEBP", "pos": [1830, 30], "size": [680, 610], "flags": {}, "order": 12, "mode": 0, "inputs": [ { "name": "images", "type": "IMAGE", "link": 106 } ], "outputs": [], "properties": {}, "widgets_values": ["ComfyUI", 24, false, 90, "default"] }, { "id": 77, "type": "MarkdownNote", "pos": [45, 600], "size": [225, 60], "flags": {}, "order": 5, "mode": 0, "inputs": [], "outputs": [], "properties": {}, "widgets_values": [ "\ud83d\udec8 [Learn more about this workflow](https://comfyanonymous.github.io/ComfyUI_examples/ltxv/)" ], "color": "#432", "bgcolor": "#653" } ], "links": [ [74, 38, 0, 6, 0, "CLIP"], [75, 38, 0, 7, 0, "CLIP"], [87, 44, 2, 8, 1, "VAE"], [106, 8, 0, 41, 0, "IMAGE"], [166, 69, 0, 72, 1, "CONDITIONING"], [167, 69, 1, 72, 2, "CONDITIONING"], [168, 70, 0, 71, 0, "LATENT"], [169, 6, 0, 69, 0, "CONDITIONING"], [170, 7, 0, 69, 1, "CONDITIONING"], [171, 72, 0, 8, 0, "LATENT"], [172, 73, 0, 72, 3, "SAMPLER"], [175, 70, 0, 72, 5, "LATENT"], [181, 44, 0, 72, 0, "MODEL"], [182, 71, 0, 72, 4, "SIGMAS"] ], "groups": [], "config": {}, "extra": { "ds": { "scale": 0.65, "offset": [1490.32, 926.49] } }, "version": 0.4, "models": [ { "name": "t5xxl_fp16.safetensors", "url": "https://huggingface.co/comfyanonymous/flux_text_encoders/resolve/main/t5xxl_fp16.safetensors?download=true", "directory": "text_encoders" }, { "name": "ltx-video-2b-v0.9.safetensors", "url": "https://huggingface.co/Lightricks/LTX-Video/resolve/main/ltx-video-2b-v0.9.safetensors?download=true", "directory": "checkpoints" } ] }