adding tests

This commit is contained in:
doctorpangloss 2025-06-17 14:26:47 -07:00
parent 8f836ad2ee
commit 7d1f840636
9 changed files with 703 additions and 4 deletions

View File

@ -280,7 +280,7 @@ async def _start_comfyui(from_script_dir: Optional[Path] = None):
await run(server, address=first_listen_addr, port=args.port, verbose=not args.dont_print_server,
call_on_start=call_on_start)
except (asyncio.CancelledError, KeyboardInterrupt):
logger.debug("\nStopped server")
logger.debug("Stopped server")
finally:
if distributed:
await q.close()

View File

@ -40,6 +40,17 @@ from ..tracing_compatibility import ProgressSpanSampler
from ..tracing_compatibility import patch_spanbuilder_set_channel
from ..vendor.aiohttp_server_instrumentation import AioHttpServerInstrumentor
# Manually call the _init_dll_path method to ensure that the system path is searched for FFMPEG.
# Calling torchaudio._extension.utils._init_dll_path does not work because it is initializing the torchadio module prematurely or something.
# See: https://github.com/pytorch/audio/issues/3789
if sys.platform == "win32":
for path in os.environ.get("PATH", "").split(os.pathsep):
if os.path.exists(path):
try:
os.add_dll_directory(path)
except Exception:
pass
this_logger = logging.getLogger(__name__)
options.enable_args_parsing()

View File

@ -307,7 +307,6 @@ KNOWN_CHECKPOINTS: Final[KnownDownloadables] = KnownDownloadables([
HuggingFile("Comfy-Org/stable-diffusion-3.5-fp8", "sd3.5_medium_incl_clips_t5xxlfp8scaled.safetensors"),
HuggingFile("fal/AuraFlow-v0.2", "aura_flow_0.2.safetensors"),
HuggingFile("lodestones/Chroma", "Chroma_v1.0.safetensors"),
HuggingFile("lodestones/Chroma", "chroma-unlocked-v37.safetensors"),
HuggingFile("Comfy-Org/mochi_preview_repackaged", "all_in_one/mochi_preview_fp8_scaled.safetensors"),
HuggingFile("Lightricks/LTX-Video", "ltx-video-2b-v0.9.5.safetensors"),
HuggingFile("Comfy-Org/ACE-Step_ComfyUI_repackaged", "all_in_one/ace_step_v1_3.5b.safetensors"),
@ -546,6 +545,7 @@ KNOWN_UNET_MODELS: Final[KnownDownloadables] = KnownDownloadables([
HuggingFile("Comfy-Org/Cosmos_Predict2_repackaged", "cosmos_predict2_2B_video2world_480p_16fps.safetensors"),
HuggingFile("Comfy-Org/Wan_2.1_ComfyUI_repackaged", "split_files/diffusion_models/wan2.1_vace_14B_fp16.safetensors"),
HuggingFile("Comfy-Org/Wan_2.1_ComfyUI_repackaged", "split_files/diffusion_models/wan2.1_fun_camera_v1.1_1.3B_bf16.safetensors"),
HuggingFile("lodestones/Chroma", "chroma-unlocked-v37.safetensors"),
], folder_names=["diffusion_models", "unet"])
KNOWN_CLIP_MODELS: Final[KnownDownloadables] = KnownDownloadables([

View File

@ -67,9 +67,9 @@ if hasattr(torch.serialization, "add_safe_globals"): # TODO: this was added in
torch.serialization.add_safe_globals([ModelCheckpoint, scalar, dtype, Float64DType, encode])
ALWAYS_SAFE_LOAD = True
logging.debug("Checkpoint files will always be loaded safely.")
logger.debug("Checkpoint files will always be loaded safely.")
else:
logging.debug("Warning, you are using an old pytorch version and some ckpt/pt files might be loaded unsafely. Upgrading to 2.4 or above is recommended.")
logger.debug("Warning, you are using an old pytorch version and some ckpt/pt files might be loaded unsafely. Upgrading to 2.4 or above is recommended.")
# deprecate PROGRESS_BAR_ENABLED

View File

@ -90,6 +90,10 @@ WAN_VIDEO_14B_EXTENDED_RESOLUTIONS = [
(544, 704)
]
HIDREAM_1_EDIT_RESOLUTIONS = [
(768, 768),
]
RESOLUTION_MAP = {
"SDXL/SD3/Flux": SDXL_SD3_FLUX_RESOLUTIONS,
"SD1.5": SD_RESOLUTIONS,
@ -100,6 +104,8 @@ RESOLUTION_MAP = {
"WAN 14b": WAN_VIDEO_14B_RESOLUTIONS,
"WAN 1.3b": WAN_VIDEO_1_3B_RESOLUTIONS,
"WAN 14b with extras": WAN_VIDEO_14B_EXTENDED_RESOLUTIONS,
"HiDream 1 Edit": HIDREAM_1_EDIT_RESOLUTIONS,
"Unknown": []
}
RESOLUTION_NAMES = list(RESOLUTION_MAP.keys())

View File

@ -0,0 +1,142 @@
{
"3": {
"inputs": {
"seed": 134898694445843,
"steps": 1,
"cfg": 4,
"sampler_name": "euler",
"scheduler": "simple",
"denoise": 1,
"model": [
"13",
0
],
"positive": [
"6",
0
],
"negative": [
"7",
0
],
"latent_image": [
"12",
0
]
},
"class_type": "KSampler",
"_meta": {
"title": "KSampler"
}
},
"6": {
"inputs": {
"text": "aesthetic 2, anime rpg game style, cute anime girl with gigantic fennec ears and a big fluffy fox tail with long wavy blonde hair and large blue eyes blonde colored eyelashes wearing a pink sweater a large oversized gold trimmed black winter coat and a long blue maxi skirt and large winter boots and a red scarf and large gloves dirty clothes muddy clothes, she is happy and holding a sword in a winter forest with evergreen trees there are the beautiful snow mountains in the background",
"clip": [
"11",
0
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Prompt)"
}
},
"7": {
"inputs": {
"text": "low quality, bad anatomy, extra digits, missing digits, extra limbs, missing limbs",
"clip": [
"11",
0
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Prompt)"
}
},
"8": {
"inputs": {
"samples": [
"3",
0
],
"vae": [
"15",
0
]
},
"class_type": "VAEDecode",
"_meta": {
"title": "VAE Decode"
}
},
"9": {
"inputs": {
"filename_prefix": "ComfyUI",
"images": [
"8",
0
]
},
"class_type": "SaveImage",
"_meta": {
"title": "Save Image"
}
},
"10": {
"inputs": {
"clip_name": "t5xxl_fp16.safetensors",
"type": "chroma",
"device": "default"
},
"class_type": "CLIPLoader",
"_meta": {
"title": "Load CLIP"
}
},
"11": {
"inputs": {
"min_padding": 0,
"min_length": 3,
"clip": [
"10",
0
]
},
"class_type": "T5TokenizerOptions",
"_meta": {
"title": "T5TokenizerOptions"
}
},
"12": {
"inputs": {
"width": 1024,
"height": 1024,
"batch_size": 1
},
"class_type": "EmptySD3LatentImage",
"_meta": {
"title": "EmptySD3LatentImage"
}
},
"13": {
"inputs": {
"unet_name": "chroma-unlocked-v37.safetensors",
"weight_dtype": "default"
},
"class_type": "UNETLoader",
"_meta": {
"title": "Load Diffusion Model"
}
},
"15": {
"inputs": {
"vae_name": "ae.safetensors"
},
"class_type": "VAELoader",
"_meta": {
"title": "Load VAE"
}
}
}

View File

@ -0,0 +1,170 @@
{
"3": {
"inputs": {
"seed": 788533681999051,
"steps": 1,
"cfg": 4,
"sampler_name": "euler",
"scheduler": "simple",
"denoise": 1,
"model": [
"13",
0
],
"positive": [
"6",
0
],
"negative": [
"7",
0
],
"latent_image": [
"28",
0
]
},
"class_type": "KSampler",
"_meta": {
"title": "KSampler"
}
},
"6": {
"inputs": {
"text": "camera moving quickly through the scene timelapse wind",
"clip": [
"10",
0
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Prompt)"
}
},
"7": {
"inputs": {
"text": "",
"clip": [
"10",
0
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Prompt)"
}
},
"8": {
"inputs": {
"samples": [
"3",
0
],
"vae": [
"15",
0
]
},
"class_type": "VAEDecode",
"_meta": {
"title": "VAE Decode"
}
},
"10": {
"inputs": {
"clip_name": "oldt5_xxl_fp8_e4m3fn_scaled.safetensors",
"type": "cosmos",
"device": "default"
},
"class_type": "CLIPLoader",
"_meta": {
"title": "Load CLIP"
}
},
"13": {
"inputs": {
"unet_name": "cosmos_predict2_2B_video2world_480p_16fps.safetensors",
"weight_dtype": "default"
},
"class_type": "UNETLoader",
"_meta": {
"title": "Load Diffusion Model"
}
},
"15": {
"inputs": {
"vae_name": "wan_2.1_vae.safetensors"
},
"class_type": "VAELoader",
"_meta": {
"title": "Load VAE"
}
},
"28": {
"inputs": {
"width": 848,
"height": 480,
"length": 1,
"batch_size": 1,
"vae": [
"15",
0
],
"start_image": [
"33",
0
]
},
"class_type": "CosmosPredict2ImageToVideoLatent",
"_meta": {
"title": "CosmosPredict2ImageToVideoLatent"
}
},
"30": {
"inputs": {
"filename_prefix": "ComfyUI",
"fps": 16.000000000000004,
"lossless": false,
"quality": 85,
"method": "default",
"images": [
"8",
0
]
},
"class_type": "SaveAnimatedWEBP",
"_meta": {
"title": "SaveAnimatedWEBP"
}
},
"32": {
"inputs": {
"value": "https://upload.wikimedia.org/wikipedia/commons/1/1e/Max_Cornelius_-_Rocky_Mountains.jpg",
"name": "",
"title": "",
"description": "",
"__required": true
},
"class_type": "ImageRequestParameter",
"_meta": {
"title": "ImageRequestParameter"
}
},
"33": {
"inputs": {
"upscale_method": "bilinear",
"width": 848,
"height": 480,
"crop": "center",
"image": [
"32",
0
]
},
"class_type": "ImageScale",
"_meta": {
"title": "Upscale Image"
}
}
}

View File

@ -0,0 +1,142 @@
{
"3": {
"inputs": {
"seed": 143596420866840,
"steps": 1,
"cfg": 1,
"sampler_name": "lcm",
"scheduler": "simple",
"denoise": 1,
"model": [
"70",
0
],
"positive": [
"16",
0
],
"negative": [
"40",
0
],
"latent_image": [
"53",
0
]
},
"class_type": "KSampler",
"_meta": {
"title": "KSampler"
}
},
"8": {
"inputs": {
"samples": [
"3",
0
],
"vae": [
"55",
0
]
},
"class_type": "VAEDecode",
"_meta": {
"title": "VAE Decode"
}
},
"9": {
"inputs": {
"filename_prefix": "ComfyUI",
"images": [
"8",
0
]
},
"class_type": "SaveImage",
"_meta": {
"title": "Save Image"
}
},
"16": {
"inputs": {
"text": "anime girl with massive fennec ears and a big fluffy fox tail with long wavy blonde hair and blue eyes wearing a pink sweater a large oversized black winter coat and a long blue maxi skirt and large winter boots and a red scarf and large gloves sitting in a sled sledding fast down a snow mountain",
"clip": [
"54",
0
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "Positive Prompt"
}
},
"40": {
"inputs": {
"text": "bad ugly jpeg artifacts",
"clip": [
"54",
0
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "Negative Prompt"
}
},
"53": {
"inputs": {
"width": 1024,
"height": 1024,
"batch_size": 1
},
"class_type": "EmptySD3LatentImage",
"_meta": {
"title": "EmptySD3LatentImage"
}
},
"54": {
"inputs": {
"clip_name1": "clip_l_hidream.safetensors",
"clip_name2": "clip_g_hidream.safetensors",
"clip_name3": "t5xxl_fp8_e4m3fn_scaled.safetensors",
"clip_name4": "llama_3.1_8b_instruct_fp8_scaled.safetensors"
},
"class_type": "QuadrupleCLIPLoader",
"_meta": {
"title": "QuadrupleCLIPLoader"
}
},
"55": {
"inputs": {
"vae_name": "ae.safetensors"
},
"class_type": "VAELoader",
"_meta": {
"title": "Load VAE"
}
},
"69": {
"inputs": {
"unet_name": "hidream_i1_dev_bf16.safetensors",
"weight_dtype": "default"
},
"class_type": "UNETLoader",
"_meta": {
"title": "Load Diffusion Model"
}
},
"70": {
"inputs": {
"shift": 6.000000000000001,
"model": [
"69",
0
]
},
"class_type": "ModelSamplingSD3",
"_meta": {
"title": "ModelSamplingSD3"
}
}
}

View File

@ -0,0 +1,228 @@
{
"6": {
"inputs": {
"text": "snow at sunset oil painting",
"clip": [
"42",
0
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "Positive"
}
},
"7": {
"inputs": {
"text": "",
"clip": [
"42",
0
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "Negative"
}
},
"8": {
"inputs": {
"samples": [
"37:8",
0
],
"vae": [
"43",
0
]
},
"class_type": "VAEDecode",
"_meta": {
"title": "VAE Decode"
}
},
"9": {
"inputs": {
"filename_prefix": "ComfyUI",
"images": [
"8",
0
]
},
"class_type": "SaveImage",
"_meta": {
"title": "Save Image"
}
},
"41": {
"inputs": {
"unet_name": "hidream_e1_full_bf16.safetensors",
"weight_dtype": "default"
},
"class_type": "UNETLoader",
"_meta": {
"title": "Load Diffusion Model"
}
},
"42": {
"inputs": {
"clip_name1": "clip_g_hidream.safetensors",
"clip_name2": "clip_l_hidream.safetensors",
"clip_name3": "t5xxl_fp8_e4m3fn_scaled.safetensors",
"clip_name4": "llama_3.1_8b_instruct_fp8_scaled.safetensors"
},
"class_type": "QuadrupleCLIPLoader",
"_meta": {
"title": "QuadrupleCLIPLoader"
}
},
"43": {
"inputs": {
"vae_name": "ae.safetensors"
},
"class_type": "VAELoader",
"_meta": {
"title": "Load VAE"
}
},
"44": {
"inputs": {
"upscale_method": "bilinear",
"width": 768,
"height": 768,
"crop": "center",
"image": [
"45",
0
]
},
"class_type": "ImageScale",
"_meta": {
"title": "Upscale Image"
}
},
"45": {
"inputs": {
"value": "https://upload.wikimedia.org/wikipedia/commons/1/1e/Max_Cornelius_-_Rocky_Mountains.jpg",
"name": "",
"title": "",
"description": "",
"__required": true
},
"class_type": "ImageRequestParameter",
"_meta": {
"title": "ImageRequestParameter"
}
},
"37:0": {
"inputs": {
"sampler_name": "euler"
},
"class_type": "KSamplerSelect",
"_meta": {
"title": "KSamplerSelect"
}
},
"37:1": {
"inputs": {
"noise_seed": 208742294852617
},
"class_type": "RandomNoise",
"_meta": {
"title": "RandomNoise"
}
},
"37:3": {
"inputs": {
"scheduler": "normal",
"steps": 1,
"denoise": 1,
"model": [
"41",
0
]
},
"class_type": "BasicScheduler",
"_meta": {
"title": "BasicScheduler"
}
},
"37:6": {
"inputs": {
"positive": [
"6",
0
],
"negative": [
"7",
0
],
"vae": [
"43",
0
],
"pixels": [
"44",
0
]
},
"class_type": "InstructPixToPixConditioning",
"_meta": {
"title": "InstructPixToPixConditioning"
}
},
"37:7": {
"inputs": {
"cfg_conds": 5,
"cfg_cond2_negative": 2,
"model": [
"41",
0
],
"cond1": [
"37:6",
0
],
"cond2": [
"37:6",
1
],
"negative": [
"7",
0
]
},
"class_type": "DualCFGGuider",
"_meta": {
"title": "DualCFGGuider"
}
},
"37:8": {
"inputs": {
"noise": [
"37:1",
0
],
"guider": [
"37:7",
0
],
"sampler": [
"37:0",
0
],
"sigmas": [
"37:3",
0
],
"latent_image": [
"37:6",
2
]
},
"class_type": "SamplerCustomAdvanced",
"_meta": {
"title": "SamplerCustomAdvanced"
}
}
}