ComfyUI/tests/inference/workflows/hunyuan_image-0.json

276 lines
5.6 KiB
JSON

{
"3": {
"inputs": {
"seed": 215668140279030,
"steps": 1,
"cfg": 3.5,
"sampler_name": "euler",
"scheduler": "simple",
"denoise": 1,
"model": [
"13",
0
],
"positive": [
"6",
0
],
"negative": [
"7",
0
],
"latent_image": [
"29",
0
]
},
"class_type": "KSampler",
"_meta": {
"title": "KSampler"
}
},
"6": {
"inputs": {
"text": "cute anime girl with massive fennec ears and a big fluffy fox tail with long wavy blonde hair between eyes and large blue eyes blonde colored eyelashes chubby wearing oversized clothes summer uniform large black coat long blue maxi skirt muddy clothes happy sitting on the side of the road in a run down dark gritty cyberpunk city with neon and a crumbling skyscraper in the rain at night while dipping her feet in a river of water she is holding a sign that says \"ComfyUI is the best\" and another one that says \"The Future is Comfy\"",
"clip": [
"26",
0
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Prompt)"
}
},
"7": {
"inputs": {
"text": "low quality, bad anatomy, extra digits, missing digits, extra limbs, missing limbs",
"clip": [
"26",
0
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Prompt)"
}
},
"8": {
"inputs": {
"samples": [
"3",
0
],
"vae": [
"15",
0
]
},
"class_type": "VAEDecode",
"_meta": {
"title": "VAE Decode"
}
},
"13": {
"inputs": {
"unet_name": "hunyuanimage2.1_bf16.safetensors",
"weight_dtype": "default"
},
"class_type": "UNETLoader",
"_meta": {
"title": "Load Diffusion Model"
}
},
"15": {
"inputs": {
"vae_name": "hunyuan_image_2.1_vae_fp16.safetensors"
},
"class_type": "VAELoader",
"_meta": {
"title": "Load VAE"
}
},
"26": {
"inputs": {
"clip_name1": "qwen_2.5_vl_7b.safetensors",
"clip_name2": "byt5_small_glyphxl_fp16.safetensors",
"type": "hunyuan_image",
"device": "default"
},
"class_type": "DualCLIPLoader",
"_meta": {
"title": "DualCLIPLoader"
}
},
"29": {
"inputs": {
"width": 2048,
"height": 2048,
"batch_size": 1
},
"class_type": "EmptyHunyuanImageLatent",
"_meta": {
"title": "EmptyHunyuanImageLatent"
}
},
"41": {
"inputs": {
"filename_prefix": "ComfyUI",
"images": [
"66:61",
0
]
},
"class_type": "SaveImage",
"_meta": {
"title": "Save Image"
}
},
"67": {
"inputs": {},
"class_type": "GroupOffload",
"_meta": {
"title": "GroupOffload"
}
},
"66:58": {
"inputs": {
"vae_name": "hunyuan_image_refiner_vae_fp16.safetensors"
},
"class_type": "VAELoader",
"_meta": {
"title": "Load VAE"
}
},
"66:59": {
"inputs": {
"pixels": [
"8",
0
],
"vae": [
"66:58",
0
]
},
"class_type": "VAEEncode",
"_meta": {
"title": "VAE Encode"
}
},
"66:61": {
"inputs": {
"samples": [
"66:63",
0
],
"vae": [
"66:58",
0
]
},
"class_type": "VAEDecode",
"_meta": {
"title": "VAE Decode"
}
},
"66:62": {
"inputs": {
"noise_augmentation": 0.1,
"positive": [
"66:64",
0
],
"negative": [
"66:65",
0
],
"latent": [
"66:59",
0
]
},
"class_type": "HunyuanRefinerLatent",
"_meta": {
"title": "HunyuanRefinerLatent"
}
},
"66:64": {
"inputs": {
"text": "<|start_header_id|>system<|end_header_id|>\n\nDescribe the image by detailing the color, shape, size, texture, quantity, text, spatial relationships of the objects and background:\n\n<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nMake the image high quality\n<|eot_id|>",
"clip": [
"26",
0
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Prompt)"
}
},
"66:65": {
"inputs": {
"text": "<|start_header_id|>system<|end_header_id|>\n\nDescribe the image by detailing the color, shape, size, texture, quantity, text, spatial relationships of the objects and background:\n\n<|eot_id|><|start_header_id|>user<|end_header_id|>\n\n\n<|eot_id|>",
"clip": [
"26",
0
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Prompt)"
}
},
"66:60": {
"inputs": {
"unet_name": "hunyuanimage2.1_refiner_bf16.safetensors",
"weight_dtype": "default"
},
"class_type": "UNETLoader",
"_meta": {
"title": "Load Diffusion Model"
}
},
"66:66": {
"inputs": {
"model": [
"66:60",
0
]
},
"class_type": "GroupOffload",
"_meta": {
"title": "GroupOffload"
}
},
"66:63": {
"inputs": {
"seed": 770039891896361,
"steps": 1,
"cfg": 1,
"sampler_name": "euler",
"scheduler": "simple",
"denoise": 1,
"model": [
"66:66",
0
],
"positive": [
"66:62",
0
],
"negative": [
"66:62",
1
],
"latent_image": [
"66:62",
2
]
},
"class_type": "KSampler",
"_meta": {
"title": "KSampler"
}
}
}