mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2026-01-11 06:40:48 +08:00
Pass all tests. Add Qwen-Edit and other Qwen checkpoints for testing
This commit is contained in:
parent
6af812f9a8
commit
6a48fc1c40
@ -380,6 +380,19 @@ KNOWN_LORAS: Final[KnownDownloadables] = KnownDownloadables([
|
||||
HuggingFile("black-forest-labs/FLUX.1-Canny-dev-lora", "flux1-canny-dev-lora.safetensors"),
|
||||
HuggingFile("black-forest-labs/FLUX.1-Depth-dev-lora", "flux1-depth-dev-lora.safetensors"),
|
||||
HuggingFile("latent-consistency/lcm-lora-sdxl", "pytorch_lora_weights.safetensors", save_with_filename="lcm_lora_sdxl.safetensors"),
|
||||
HuggingFile("lightx2v/Qwen-Image-Lightning", "Qwen-Image-Lightning-4steps-V1.0.safetensors", show_in_ui=False),
|
||||
HuggingFile("lightx2v/Qwen-Image-Lightning", "Qwen-Image-Lightning-4steps-V1.0-bf16.safetensors", show_in_ui=False),
|
||||
HuggingFile("lightx2v/Qwen-Image-Lightning", "Qwen-Image-Lightning-4steps-V2.0.safetensors"),
|
||||
HuggingFile("lightx2v/Qwen-Image-Lightning", "Qwen-Image-Lightning-4steps-V2.0-bf16.safetensors", show_in_ui=False),
|
||||
HuggingFile("lightx2v/Qwen-Image-Lightning", "Qwen-Image-Lightning-8steps-V1.0.safetensors", show_in_ui=False),
|
||||
HuggingFile("lightx2v/Qwen-Image-Lightning", "Qwen-Image-Lightning-8steps-V1.1.safetensors", show_in_ui=False),
|
||||
HuggingFile("lightx2v/Qwen-Image-Lightning", "Qwen-Image-Lightning-8steps-V1.1-bf16.safetensors", show_in_ui=False),
|
||||
HuggingFile("lightx2v/Qwen-Image-Lightning", "Qwen-Image-Lightning-8steps-V2.0.safetensors"),
|
||||
HuggingFile("lightx2v/Qwen-Image-Lightning", "Qwen-Image-Lightning-8steps-V2.0-bf16.safetensors"),
|
||||
HuggingFile("lightx2v/Qwen-Image-Lightning", "Qwen-Image-Edit-Lightning-4steps-V1.0.safetensors"),
|
||||
HuggingFile("lightx2v/Qwen-Image-Lightning", "Qwen-Image-Edit-Lightning-4steps-V1.0-bf16.safetensors", show_in_ui=False),
|
||||
HuggingFile("lightx2v/Qwen-Image-Lightning", "Qwen-Image-Edit-Lightning-8steps-V1.0.safetensors"),
|
||||
HuggingFile("lightx2v/Qwen-Image-Lightning", "Qwen-Image-Edit-Lightning-8steps-V1.0-bf16.safetensors", show_in_ui=False),
|
||||
], folder_name="loras")
|
||||
|
||||
KNOWN_CONTROLNETS: Final[KnownDownloadables] = KnownDownloadables([
|
||||
@ -596,6 +609,10 @@ KNOWN_UNET_MODELS: Final[KnownDownloadables] = KnownDownloadables([
|
||||
HuggingFile("Comfy-Org/Qwen-Image_ComfyUI", "split_files/diffusion_models/qwen_image_fp8_e4m3fn.safetensors"),
|
||||
HuggingFile("Comfy-Org/Qwen-Image_ComfyUI", "non_official/diffusion_models/qwen_image_distill_full_bf16.safetensors"),
|
||||
HuggingFile("Comfy-Org/Qwen-Image_ComfyUI", "non_official/diffusion_models/qwen_image_distill_full_fp8_e4m3fn.safetensors"),
|
||||
HuggingFile("Comfy-Org/Qwen-Image-Edit_ComfyUI", "split_files/diffusion_models/qwen_image_edit_2509_bf16.safetensors"),
|
||||
HuggingFile("Comfy-Org/Qwen-Image-Edit_ComfyUI", "split_files/diffusion_models/qwen_image_edit_2509_fp8_e4m3fn.safetensors"),
|
||||
HuggingFile("Comfy-Org/Qwen-Image-Edit_ComfyUI", "split_files/diffusion_models/qwen_image_edit_bf16.safetensors"),
|
||||
HuggingFile("Comfy-Org/Qwen-Image-Edit_ComfyUI", "split_files/diffusion_models/qwen_image_edit_fp8_e4m3fn.safetensors"),
|
||||
], folder_names=["diffusion_models", "unet"])
|
||||
|
||||
KNOWN_CLIP_MODELS: Final[KnownDownloadables] = KnownDownloadables([
|
||||
|
||||
141
tests/inference/workflows/qwen-image-0.json
Normal file
141
tests/inference/workflows/qwen-image-0.json
Normal file
@ -0,0 +1,141 @@
|
||||
{
|
||||
"3": {
|
||||
"inputs": {
|
||||
"seed": 1125488487853216,
|
||||
"steps": 1,
|
||||
"cfg": 1,
|
||||
"sampler_name": "res_multistep",
|
||||
"scheduler": "normal",
|
||||
"denoise": 1,
|
||||
"model": [
|
||||
"66",
|
||||
0
|
||||
],
|
||||
"positive": [
|
||||
"6",
|
||||
0
|
||||
],
|
||||
"negative": [
|
||||
"7",
|
||||
0
|
||||
],
|
||||
"latent_image": [
|
||||
"58",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "KSampler",
|
||||
"_meta": {
|
||||
"title": "KSampler"
|
||||
}
|
||||
},
|
||||
"6": {
|
||||
"inputs": {
|
||||
"text": "a man",
|
||||
"clip": [
|
||||
"38",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "CLIPTextEncode",
|
||||
"_meta": {
|
||||
"title": "CLIP Text Encode (Positive Prompt)"
|
||||
}
|
||||
},
|
||||
"7": {
|
||||
"inputs": {
|
||||
"text": "negative prompt",
|
||||
"clip": [
|
||||
"38",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "CLIPTextEncode",
|
||||
"_meta": {
|
||||
"title": "CLIP Text Encode (Negative Prompt)"
|
||||
}
|
||||
},
|
||||
"8": {
|
||||
"inputs": {
|
||||
"samples": [
|
||||
"3",
|
||||
0
|
||||
],
|
||||
"vae": [
|
||||
"39",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "VAEDecode",
|
||||
"_meta": {
|
||||
"title": "VAE Decode"
|
||||
}
|
||||
},
|
||||
"37": {
|
||||
"inputs": {
|
||||
"unet_name": "qwen_image_fp8_e4m3fn.safetensors",
|
||||
"weight_dtype": "fp8_e4m3fn"
|
||||
},
|
||||
"class_type": "UNETLoader",
|
||||
"_meta": {
|
||||
"title": "Load Diffusion Model"
|
||||
}
|
||||
},
|
||||
"38": {
|
||||
"inputs": {
|
||||
"clip_name": "qwen_2.5_vl_7b.safetensors",
|
||||
"type": "qwen_image",
|
||||
"device": "default"
|
||||
},
|
||||
"class_type": "CLIPLoader",
|
||||
"_meta": {
|
||||
"title": "Load CLIP"
|
||||
}
|
||||
},
|
||||
"39": {
|
||||
"inputs": {
|
||||
"vae_name": "qwen_image_vae.safetensors"
|
||||
},
|
||||
"class_type": "VAELoader",
|
||||
"_meta": {
|
||||
"title": "Load VAE"
|
||||
}
|
||||
},
|
||||
"58": {
|
||||
"inputs": {
|
||||
"width": 1328,
|
||||
"height": 1328,
|
||||
"batch_size": 1
|
||||
},
|
||||
"class_type": "EmptySD3LatentImage",
|
||||
"_meta": {
|
||||
"title": "EmptySD3LatentImage"
|
||||
}
|
||||
},
|
||||
"60": {
|
||||
"inputs": {
|
||||
"filename_prefix": "ComfyUI",
|
||||
"images": [
|
||||
"8",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "SaveImage",
|
||||
"_meta": {
|
||||
"title": "Save Image"
|
||||
}
|
||||
},
|
||||
"66": {
|
||||
"inputs": {
|
||||
"shift": 3.1000000000000005,
|
||||
"model": [
|
||||
"37",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "ModelSamplingAuraFlow",
|
||||
"_meta": {
|
||||
"title": "ModelSamplingAuraFlow"
|
||||
}
|
||||
}
|
||||
}
|
||||
153
tests/inference/workflows/qwen-image-1.json
Normal file
153
tests/inference/workflows/qwen-image-1.json
Normal file
@ -0,0 +1,153 @@
|
||||
{
|
||||
"3": {
|
||||
"inputs": {
|
||||
"seed": 1125488487853216,
|
||||
"steps": 1,
|
||||
"cfg": 1,
|
||||
"sampler_name": "res_multistep",
|
||||
"scheduler": "normal",
|
||||
"denoise": 1,
|
||||
"model": [
|
||||
"66",
|
||||
0
|
||||
],
|
||||
"positive": [
|
||||
"6",
|
||||
0
|
||||
],
|
||||
"negative": [
|
||||
"7",
|
||||
0
|
||||
],
|
||||
"latent_image": [
|
||||
"58",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "KSampler",
|
||||
"_meta": {
|
||||
"title": "KSampler"
|
||||
}
|
||||
},
|
||||
"6": {
|
||||
"inputs": {
|
||||
"text": "a man",
|
||||
"clip": [
|
||||
"38",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "CLIPTextEncode",
|
||||
"_meta": {
|
||||
"title": "CLIP Text Encode (Positive Prompt)"
|
||||
}
|
||||
},
|
||||
"7": {
|
||||
"inputs": {
|
||||
"text": "negative prompt",
|
||||
"clip": [
|
||||
"38",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "CLIPTextEncode",
|
||||
"_meta": {
|
||||
"title": "CLIP Text Encode (Negative Prompt)"
|
||||
}
|
||||
},
|
||||
"8": {
|
||||
"inputs": {
|
||||
"samples": [
|
||||
"3",
|
||||
0
|
||||
],
|
||||
"vae": [
|
||||
"39",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "VAEDecode",
|
||||
"_meta": {
|
||||
"title": "VAE Decode"
|
||||
}
|
||||
},
|
||||
"37": {
|
||||
"inputs": {
|
||||
"unet_name": "qwen_image_bf16.safetensors",
|
||||
"weight_dtype": "default"
|
||||
},
|
||||
"class_type": "UNETLoader",
|
||||
"_meta": {
|
||||
"title": "Load Diffusion Model"
|
||||
}
|
||||
},
|
||||
"38": {
|
||||
"inputs": {
|
||||
"clip_name": "qwen_2.5_vl_7b.safetensors",
|
||||
"type": "qwen_image",
|
||||
"device": "default"
|
||||
},
|
||||
"class_type": "CLIPLoader",
|
||||
"_meta": {
|
||||
"title": "Load CLIP"
|
||||
}
|
||||
},
|
||||
"39": {
|
||||
"inputs": {
|
||||
"vae_name": "qwen_image_vae.safetensors"
|
||||
},
|
||||
"class_type": "VAELoader",
|
||||
"_meta": {
|
||||
"title": "Load VAE"
|
||||
}
|
||||
},
|
||||
"58": {
|
||||
"inputs": {
|
||||
"width": 1328,
|
||||
"height": 1328,
|
||||
"batch_size": 1
|
||||
},
|
||||
"class_type": "EmptySD3LatentImage",
|
||||
"_meta": {
|
||||
"title": "EmptySD3LatentImage"
|
||||
}
|
||||
},
|
||||
"60": {
|
||||
"inputs": {
|
||||
"filename_prefix": "ComfyUI",
|
||||
"images": [
|
||||
"8",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "SaveImage",
|
||||
"_meta": {
|
||||
"title": "Save Image"
|
||||
}
|
||||
},
|
||||
"66": {
|
||||
"inputs": {
|
||||
"shift": 3.1000000000000005,
|
||||
"model": [
|
||||
"75",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "ModelSamplingAuraFlow",
|
||||
"_meta": {
|
||||
"title": "ModelSamplingAuraFlow"
|
||||
}
|
||||
},
|
||||
"75": {
|
||||
"inputs": {
|
||||
"model": [
|
||||
"37",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "GroupOffload",
|
||||
"_meta": {
|
||||
"title": "GroupOffload"
|
||||
}
|
||||
}
|
||||
}
|
||||
216
tests/inference/workflows/qwen-image-edit-0.json
Normal file
216
tests/inference/workflows/qwen-image-edit-0.json
Normal file
@ -0,0 +1,216 @@
|
||||
{
|
||||
"3": {
|
||||
"inputs": {
|
||||
"seed": 976480016588017,
|
||||
"steps": 4,
|
||||
"cfg": 1,
|
||||
"sampler_name": "res_multistep",
|
||||
"scheduler": "normal",
|
||||
"denoise": 1,
|
||||
"model": [
|
||||
"75",
|
||||
0
|
||||
],
|
||||
"positive": [
|
||||
"76",
|
||||
0
|
||||
],
|
||||
"negative": [
|
||||
"77",
|
||||
0
|
||||
],
|
||||
"latent_image": [
|
||||
"88",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "KSampler",
|
||||
"_meta": {
|
||||
"title": "KSampler"
|
||||
}
|
||||
},
|
||||
"8": {
|
||||
"inputs": {
|
||||
"samples": [
|
||||
"3",
|
||||
0
|
||||
],
|
||||
"vae": [
|
||||
"39",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "VAEDecode",
|
||||
"_meta": {
|
||||
"title": "VAE Decode"
|
||||
}
|
||||
},
|
||||
"37": {
|
||||
"inputs": {
|
||||
"unet_name": "qwen_image_edit_fp8_e4m3fn.safetensors",
|
||||
"weight_dtype": "default"
|
||||
},
|
||||
"class_type": "UNETLoader",
|
||||
"_meta": {
|
||||
"title": "Load Diffusion Model"
|
||||
}
|
||||
},
|
||||
"38": {
|
||||
"inputs": {
|
||||
"clip_name": "qwen_2.5_vl_7b_fp8_scaled.safetensors",
|
||||
"type": "qwen_image",
|
||||
"device": "default"
|
||||
},
|
||||
"class_type": "CLIPLoader",
|
||||
"_meta": {
|
||||
"title": "Load CLIP"
|
||||
}
|
||||
},
|
||||
"39": {
|
||||
"inputs": {
|
||||
"vae_name": "qwen_image_vae.safetensors"
|
||||
},
|
||||
"class_type": "VAELoader",
|
||||
"_meta": {
|
||||
"title": "Load VAE"
|
||||
}
|
||||
},
|
||||
"60": {
|
||||
"inputs": {
|
||||
"filename_prefix": "ComfyUI",
|
||||
"images": [
|
||||
"8",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "SaveImage",
|
||||
"_meta": {
|
||||
"title": "Save Image"
|
||||
}
|
||||
},
|
||||
"66": {
|
||||
"inputs": {
|
||||
"shift": 3,
|
||||
"model": [
|
||||
"89",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "ModelSamplingAuraFlow",
|
||||
"_meta": {
|
||||
"title": "ModelSamplingAuraFlow"
|
||||
}
|
||||
},
|
||||
"75": {
|
||||
"inputs": {
|
||||
"strength": 1,
|
||||
"model": [
|
||||
"66",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "CFGNorm",
|
||||
"_meta": {
|
||||
"title": "CFGNorm"
|
||||
}
|
||||
},
|
||||
"76": {
|
||||
"inputs": {
|
||||
"prompt": "Remove all UI text elements from the image. Keep the feeling that the characters and scene are in water. Also, remove the green UI elements at the bottom.",
|
||||
"clip": [
|
||||
"38",
|
||||
0
|
||||
],
|
||||
"vae": [
|
||||
"39",
|
||||
0
|
||||
],
|
||||
"image": [
|
||||
"93",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "TextEncodeQwenImageEdit",
|
||||
"_meta": {
|
||||
"title": "TextEncodeQwenImageEdit"
|
||||
}
|
||||
},
|
||||
"77": {
|
||||
"inputs": {
|
||||
"prompt": "",
|
||||
"clip": [
|
||||
"38",
|
||||
0
|
||||
],
|
||||
"vae": [
|
||||
"39",
|
||||
0
|
||||
],
|
||||
"image": [
|
||||
"93",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "TextEncodeQwenImageEdit",
|
||||
"_meta": {
|
||||
"title": "TextEncodeQwenImageEdit"
|
||||
}
|
||||
},
|
||||
"88": {
|
||||
"inputs": {
|
||||
"pixels": [
|
||||
"93",
|
||||
0
|
||||
],
|
||||
"vae": [
|
||||
"39",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "VAEEncode",
|
||||
"_meta": {
|
||||
"title": "VAE Encode"
|
||||
}
|
||||
},
|
||||
"89": {
|
||||
"inputs": {
|
||||
"lora_name": "Qwen-Image-Edit-Lightning-4steps-V1.0-bf16.safetensors",
|
||||
"strength_model": 1,
|
||||
"model": [
|
||||
"37",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "LoraLoaderModelOnly",
|
||||
"_meta": {
|
||||
"title": "LoraLoaderModelOnly"
|
||||
}
|
||||
},
|
||||
"93": {
|
||||
"inputs": {
|
||||
"upscale_method": "lanczos",
|
||||
"megapixels": 1,
|
||||
"image": [
|
||||
"101",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "ImageScaleToTotalPixels",
|
||||
"_meta": {
|
||||
"title": "Scale Image to Total Pixels"
|
||||
}
|
||||
},
|
||||
"101": {
|
||||
"inputs": {
|
||||
"value": "https://comfyanonymous.github.io/ComfyUI_examples/chroma/fennec_girl_sing.png",
|
||||
"name": "",
|
||||
"title": "",
|
||||
"description": "",
|
||||
"__required": true
|
||||
},
|
||||
"class_type": "ImageRequestParameter",
|
||||
"_meta": {
|
||||
"title": "ImageRequestParameter"
|
||||
}
|
||||
}
|
||||
}
|
||||
Loading…
Reference in New Issue
Block a user