mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2026-05-13 02:33:02 +08:00
155 lines
4.5 KiB
JSON
155 lines
4.5 KiB
JSON
{
|
||
"97": {
|
||
"inputs": {"image": "__INPUT_IMAGE__"},
|
||
"class_type": "LoadImage",
|
||
"_meta": {"title": "Start Frame Image"}
|
||
},
|
||
"108": {
|
||
"inputs": {
|
||
"filename_prefix": "video/Wan2.2_image_to_video",
|
||
"format": "auto",
|
||
"codec": "auto",
|
||
"video-preview": "",
|
||
"video": ["130:117", 0]
|
||
},
|
||
"class_type": "SaveVideo",
|
||
"_meta": {"title": "Save Video"}
|
||
},
|
||
"130:105": {
|
||
"inputs": {
|
||
"clip_name": "umt5_xxl_fp8_e4m3fn_scaled.safetensors",
|
||
"type": "wan",
|
||
"device": "default"
|
||
},
|
||
"class_type": "CLIPLoader",
|
||
"_meta": {"title": "Load CLIP"}
|
||
},
|
||
"130:106": {
|
||
"inputs": {"vae_name": "wan_2.1_vae.safetensors"},
|
||
"class_type": "VAELoader",
|
||
"_meta": {"title": "Load VAE"}
|
||
},
|
||
"130:107": {
|
||
"inputs": {
|
||
"text": "A felt-style little eagle cashier greeting, waving, and smiling at the camera.",
|
||
"clip": ["130:105", 0]
|
||
},
|
||
"class_type": "CLIPTextEncode",
|
||
"_meta": {"title": "CLIP Text Encode (Positive Prompt)"}
|
||
},
|
||
"130:109": {
|
||
"inputs": {"shift": 5.000000000000001, "model": ["130:126", 0]},
|
||
"class_type": "ModelSamplingSD3",
|
||
"_meta": {"title": "ModelSamplingSD3"}
|
||
},
|
||
"130:110": {
|
||
"inputs": {
|
||
"add_noise": "enable",
|
||
"noise_seed": 636787045983965,
|
||
"steps": 4,
|
||
"cfg": 1,
|
||
"sampler_name": "euler",
|
||
"scheduler": "simple",
|
||
"start_at_step": 0,
|
||
"end_at_step": 2,
|
||
"return_with_leftover_noise": "enable",
|
||
"model": ["130:109", 0],
|
||
"positive": ["130:128", 0],
|
||
"negative": ["130:128", 1],
|
||
"latent_image": ["130:128", 2]
|
||
},
|
||
"class_type": "KSamplerAdvanced",
|
||
"_meta": {"title": "KSampler (Advanced)"}
|
||
},
|
||
"130:111": {
|
||
"inputs": {
|
||
"add_noise": "disable",
|
||
"noise_seed": 0,
|
||
"steps": 4,
|
||
"cfg": 1,
|
||
"sampler_name": "euler",
|
||
"scheduler": "simple",
|
||
"start_at_step": 2,
|
||
"end_at_step": 4,
|
||
"return_with_leftover_noise": "disable",
|
||
"model": ["130:124", 0],
|
||
"positive": ["130:128", 0],
|
||
"negative": ["130:128", 1],
|
||
"latent_image": ["130:110", 0]
|
||
},
|
||
"class_type": "KSamplerAdvanced",
|
||
"_meta": {"title": "KSampler (Advanced)"}
|
||
},
|
||
"130:117": {
|
||
"inputs": {"fps": 16, "images": ["130:129", 0]},
|
||
"class_type": "CreateVideo",
|
||
"_meta": {"title": "Create Video"}
|
||
},
|
||
"130:122": {
|
||
"inputs": {
|
||
"unet_name": "wan2.2_i2v_high_noise_14B_fp8_scaled.safetensors",
|
||
"weight_dtype": "default"
|
||
},
|
||
"class_type": "UNETLoader",
|
||
"_meta": {"title": "Load Diffusion Model"}
|
||
},
|
||
"130:123": {
|
||
"inputs": {
|
||
"unet_name": "wan2.2_i2v_low_noise_14B_fp8_scaled.safetensors",
|
||
"weight_dtype": "default"
|
||
},
|
||
"class_type": "UNETLoader",
|
||
"_meta": {"title": "Load Diffusion Model"}
|
||
},
|
||
"130:124": {
|
||
"inputs": {"shift": 5.000000000000001, "model": ["130:127", 0]},
|
||
"class_type": "ModelSamplingSD3",
|
||
"_meta": {"title": "ModelSamplingSD3"}
|
||
},
|
||
"130:125": {
|
||
"inputs": {
|
||
"text": "色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走",
|
||
"clip": ["130:105", 0]
|
||
},
|
||
"class_type": "CLIPTextEncode",
|
||
"_meta": {"title": "CLIP Text Encode (Negative Prompt)"}
|
||
},
|
||
"130:126": {
|
||
"inputs": {
|
||
"lora_name": "wan2.2_i2v_lightx2v_4steps_lora_v1_high_noise.safetensors",
|
||
"strength_model": 1.0000000000000002,
|
||
"model": ["130:122", 0]
|
||
},
|
||
"class_type": "LoraLoaderModelOnly",
|
||
"_meta": {"title": "Load LoRA"}
|
||
},
|
||
"130:127": {
|
||
"inputs": {
|
||
"lora_name": "wan2.2_i2v_lightx2v_4steps_lora_v1_low_noise.safetensors",
|
||
"strength_model": 1.0000000000000002,
|
||
"model": ["130:123", 0]
|
||
},
|
||
"class_type": "LoraLoaderModelOnly",
|
||
"_meta": {"title": "Load LoRA"}
|
||
},
|
||
"130:128": {
|
||
"inputs": {
|
||
"width": 720,
|
||
"height": 720,
|
||
"length": 81,
|
||
"batch_size": 1,
|
||
"positive": ["130:107", 0],
|
||
"negative": ["130:125", 0],
|
||
"vae": ["130:106", 0],
|
||
"start_image": ["97", 0]
|
||
},
|
||
"class_type": "WanImageToVideo",
|
||
"_meta": {"title": "WanImageToVideo"}
|
||
},
|
||
"130:129": {
|
||
"inputs": {"samples": ["130:111", 0], "vae": ["130:106", 0]},
|
||
"class_type": "VAEDecode",
|
||
"_meta": {"title": "VAE Decode"}
|
||
}
|
||
}
|