mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2026-01-10 06:10:50 +08:00
Test improvements and fixes
- move workflows to distinct json files - add the comfy-org workflows for testing - fix issues where workflows from windows users would not be compatible with backends running on linux or macos in light of separator differences. Because this codebase uses get_or_download wherever checkpoints, models, etc. are used, this is the only place where the comparison is gracefully handled for downloading. Validation code will correctly convert backslashes to forward slashes, assuming that 100% of the places they are used and when comparing with a list, they are intended to be paths and not strict symbols
This commit is contained in:
parent
3dbdfcef5e
commit
2bc95c1711
@ -9,6 +9,8 @@ import threading
|
||||
import time
|
||||
import traceback
|
||||
import typing
|
||||
from os import PathLike
|
||||
from pathlib import PurePath
|
||||
from typing import List, Optional, Tuple
|
||||
|
||||
import lazy_object_proxy
|
||||
@ -23,6 +25,7 @@ from ..component_model.executor_types import ExecutorToClientProgress, Validatio
|
||||
ValidationErrorDict, NodeErrorsDictValue, ValidationErrorExtraInfoDict, FormattedValue, RecursiveExecutionTuple, \
|
||||
RecursiveExecutionErrorDetails, RecursiveExecutionErrorDetailsInterrupted, ExecutionResult, DuplicateNodeError, \
|
||||
HistoryResultDict
|
||||
from ..component_model.files import canonicalize_path
|
||||
from ..component_model.queue_types import QueueTuple, HistoryEntry, QueueItem, MAXIMUM_HISTORY_SIZE, ExecutionStatus
|
||||
from ..execution_context import new_execution_context, ExecutionContext
|
||||
from ..nodes.package import import_all_nodes_in_workspace
|
||||
@ -748,6 +751,9 @@ def validate_inputs(prompt, item, validated: typing.Dict[str, ValidateInputsTupl
|
||||
continue
|
||||
|
||||
if isinstance(type_input, list):
|
||||
if "\\" in val:
|
||||
# try to normalize paths for comparison purposes
|
||||
val = canonicalize_path(val)
|
||||
if val not in type_input:
|
||||
input_config = info
|
||||
list_info = ""
|
||||
|
||||
@ -3,6 +3,7 @@ from __future__ import annotations
|
||||
import json
|
||||
import os
|
||||
from importlib import resources as resources
|
||||
from pathlib import PurePath
|
||||
from typing import Optional
|
||||
|
||||
|
||||
@ -52,3 +53,9 @@ def get_package_as_path(package: str, subdir: Optional[str] = None) -> str:
|
||||
if traversable.is_dir():
|
||||
return str(traversable)
|
||||
raise ValueError(f"Could not find path for package={package}, subdir={subdir}, traversable={str(traversable)}")
|
||||
|
||||
|
||||
def canonicalize_path(path: os.PathLike | str | None) -> str | None:
|
||||
if path is None:
|
||||
return None
|
||||
return PurePath(path).as_posix()
|
||||
|
||||
@ -23,6 +23,7 @@ from .cli_args import args
|
||||
from .cmd import folder_paths
|
||||
from .cmd.folder_paths import add_model_folder_path, supported_pt_extensions
|
||||
from .component_model.deprecation import _deprecate_method
|
||||
from .component_model.files import canonicalize_path
|
||||
from .interruption import InterruptProcessingException
|
||||
from .model_downloader_types import CivitFile, HuggingFile, CivitModelsGetResponse, CivitFile_, Downloadable, UrlFile
|
||||
from .utils import ProgressBar, comfy_tqdm
|
||||
@ -37,13 +38,14 @@ def get_filename_list_with_downloadable(folder_name: str, known_files: Optional[
|
||||
|
||||
existing = frozenset(folder_paths.get_filename_list(folder_name))
|
||||
downloadable = frozenset() if args.disable_known_models else frozenset(str(f) for f in known_files)
|
||||
return sorted(list(existing | downloadable))
|
||||
return list(map(canonicalize_path, sorted(list(existing | downloadable))))
|
||||
|
||||
|
||||
def get_or_download(folder_name: str, filename: str, known_files: Optional[List[Downloadable] | KnownDownloadables] = None) -> Optional[str]:
|
||||
if known_files is None:
|
||||
known_files = _get_known_models_for_folder_name(folder_name)
|
||||
|
||||
filename = canonicalize_path(filename)
|
||||
path = folder_paths.get_full_path(folder_name, filename)
|
||||
|
||||
if path is None and not args.disable_known_models:
|
||||
@ -52,7 +54,10 @@ def get_or_download(folder_name: str, filename: str, known_files: Optional[List[
|
||||
this_model_directory = folder_paths.get_folder_paths(folder_name)[0]
|
||||
known_file: Optional[HuggingFile | CivitFile] = None
|
||||
for candidate in known_files:
|
||||
if str(candidate) == filename or candidate.filename == filename or filename in candidate.alternate_filenames or filename == candidate.save_with_filename:
|
||||
if (canonicalize_path(str(candidate)) == filename
|
||||
or canonicalize_path(candidate.filename) == filename
|
||||
or filename in list(map(canonicalize_path, candidate.alternate_filenames))
|
||||
or filename == canonicalize_path(candidate.save_with_filename)):
|
||||
known_file = candidate
|
||||
break
|
||||
if known_file is None:
|
||||
@ -133,14 +138,12 @@ def get_or_download(folder_name: str, filename: str, known_files: Optional[List[
|
||||
try:
|
||||
os.makedirs(this_model_directory, exist_ok=True)
|
||||
os.symlink(path, destination_link)
|
||||
except Exception:
|
||||
except Exception as exc_info:
|
||||
try:
|
||||
os.link(path, destination_link)
|
||||
except Exception as exc_info:
|
||||
link_exc_info = exc_info
|
||||
if cache_hit:
|
||||
shutil.copyfile(path, destination_link)
|
||||
except Exception as exc_info:
|
||||
link_exc_info = exc_info
|
||||
if link_exc_info is not None:
|
||||
logging.error(f"Failed to link file with alternative download save name in a way that is compatible with Hugging Face caching {repr(known_file)}. If cache_hit={cache_hit} is True, the file was copied into the destination.", exc_info=exc_info)
|
||||
@ -155,7 +158,7 @@ def get_or_download(folder_name: str, filename: str, known_files: Optional[List[
|
||||
|
||||
civit_file: CivitFile_
|
||||
for civit_file in chain.from_iterable(version['files'] for version in model_info['modelVersions']):
|
||||
if civit_file['name'] == filename:
|
||||
if canonicalize_path(civit_file['name']) == filename:
|
||||
url = civit_file['downloadUrl']
|
||||
break
|
||||
elif isinstance(known_file, UrlFile):
|
||||
@ -399,6 +402,7 @@ KNOWN_APPROX_VAES: Final[KnownDownloadables] = KnownDownloadables([
|
||||
KNOWN_VAES: Final[KnownDownloadables] = KnownDownloadables([
|
||||
HuggingFile("stabilityai/sdxl-vae", "sdxl_vae.safetensors"),
|
||||
HuggingFile("stabilityai/sd-vae-ft-mse-original", "vae-ft-mse-840000-ema-pruned.safetensors"),
|
||||
HuggingFile("black-forest-labs/FLUX.1-schnell", "ae.sft"),
|
||||
], folder_name="vae")
|
||||
|
||||
KNOWN_HUGGINGFACE_MODEL_REPOS: Final[Set[str]] = {
|
||||
@ -409,7 +413,9 @@ KNOWN_HUGGINGFACE_MODEL_REPOS: Final[Set[str]] = {
|
||||
}
|
||||
|
||||
KNOWN_UNET_MODELS: Final[KnownDownloadables] = KnownDownloadables([
|
||||
HuggingFile("ByteDance/Hyper-SD", "Hyper-SDXL-1step-Unet-Comfyui.fp16.safetensors")
|
||||
HuggingFile("ByteDance/Hyper-SD", "Hyper-SDXL-1step-Unet-Comfyui.fp16.safetensors"),
|
||||
HuggingFile("black-forest-labs/FLUX.1-schnell", "flux1-schnell.sft"),
|
||||
HuggingFile("black-forest-labs/FLUX.1-dev", "flux1-dev.sft"),
|
||||
], folder_name="unet")
|
||||
|
||||
KNOWN_CLIP_MODELS: Final[KnownDownloadables] = KnownDownloadables([
|
||||
|
||||
@ -1,4 +1,5 @@
|
||||
import node_helpers
|
||||
from comfy import node_helpers
|
||||
|
||||
|
||||
class CLIPTextEncodeFlux:
|
||||
@classmethod
|
||||
|
||||
@ -249,7 +249,7 @@ class TransformersLoader(CustomNode):
|
||||
|
||||
try:
|
||||
model = AutoModel.from_pretrained(**from_pretrained_kwargs)
|
||||
except:
|
||||
except Exception as exc_info:
|
||||
# not yet supported by automodel
|
||||
model = LlavaNextForConditionalGeneration.from_pretrained(**from_pretrained_kwargs)
|
||||
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
pytest
|
||||
pytest-asyncio
|
||||
pytest-mock
|
||||
websocket-client==1.6.1
|
||||
PyInstaller
|
||||
testcontainers
|
||||
|
||||
@ -1,545 +1,14 @@
|
||||
import importlib.resources
|
||||
import json
|
||||
from importlib.abc import Traversable
|
||||
|
||||
import pytest
|
||||
|
||||
from comfy.api.components.schema.prompt import Prompt
|
||||
from comfy.client.embedded_comfy_client import EmbeddedComfyClient
|
||||
from comfy.model_downloader import add_known_models, KNOWN_LORAS
|
||||
from comfy.model_downloader_types import CivitFile
|
||||
|
||||
_workflows = {
|
||||
"hunyuandit_1": {
|
||||
"3": {
|
||||
"inputs": {
|
||||
"seed": 377072733774956,
|
||||
"steps": 1,
|
||||
"cfg": 6,
|
||||
"sampler_name": "euler",
|
||||
"scheduler": "sgm_uniform",
|
||||
"denoise": 1,
|
||||
"model": [
|
||||
"4",
|
||||
0
|
||||
],
|
||||
"positive": [
|
||||
"6",
|
||||
0
|
||||
],
|
||||
"negative": [
|
||||
"7",
|
||||
0
|
||||
],
|
||||
"latent_image": [
|
||||
"5",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "KSampler",
|
||||
"_meta": {
|
||||
"title": "KSampler"
|
||||
}
|
||||
},
|
||||
"4": {
|
||||
"inputs": {
|
||||
"ckpt_name": "hunyuan_dit_1.2.safetensors"
|
||||
},
|
||||
"class_type": "CheckpointLoaderSimple",
|
||||
"_meta": {
|
||||
"title": "Load Checkpoint"
|
||||
}
|
||||
},
|
||||
"5": {
|
||||
"inputs": {
|
||||
"width": 1024,
|
||||
"height": 1024,
|
||||
"batch_size": 1
|
||||
},
|
||||
"class_type": "EmptyLatentImage",
|
||||
"_meta": {
|
||||
"title": "Empty Latent Image"
|
||||
}
|
||||
},
|
||||
"6": {
|
||||
"inputs": {
|
||||
"text": "a bottle with a 彩虹星系 inside it on top of a wooden table on a snowy mountain top with the ocean and clouds in the background\n",
|
||||
"clip": [
|
||||
"4",
|
||||
1
|
||||
]
|
||||
},
|
||||
"class_type": "CLIPTextEncode",
|
||||
"_meta": {
|
||||
"title": "CLIP Text Encode (Positive Prompt)"
|
||||
}
|
||||
},
|
||||
"7": {
|
||||
"inputs": {
|
||||
"text": "",
|
||||
"clip": [
|
||||
"4",
|
||||
1
|
||||
]
|
||||
},
|
||||
"class_type": "CLIPTextEncode",
|
||||
"_meta": {
|
||||
"title": "CLIP Text Encode (Negative Prompt)"
|
||||
}
|
||||
},
|
||||
"8": {
|
||||
"inputs": {
|
||||
"samples": [
|
||||
"3",
|
||||
0
|
||||
],
|
||||
"vae": [
|
||||
"4",
|
||||
2
|
||||
]
|
||||
},
|
||||
"class_type": "VAEDecode",
|
||||
"_meta": {
|
||||
"title": "VAE Decode"
|
||||
}
|
||||
},
|
||||
"9": {
|
||||
"inputs": {
|
||||
"filename_prefix": "ComfyUI",
|
||||
"images": [
|
||||
"8",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "SaveImage",
|
||||
"_meta": {
|
||||
"title": "Save Image"
|
||||
}
|
||||
}
|
||||
},
|
||||
"audio_1": {
|
||||
"14": {
|
||||
"inputs": {
|
||||
"ckpt_name": "stable_audio_open_1.0.safetensors"
|
||||
},
|
||||
"class_type": "CheckpointLoaderSimple",
|
||||
"_meta": {
|
||||
"title": "Load Checkpoint"
|
||||
}
|
||||
},
|
||||
"15": {
|
||||
"inputs": {
|
||||
"clip_name": "t5_base.safetensors",
|
||||
"type": "stable_audio"
|
||||
},
|
||||
"class_type": "CLIPLoader",
|
||||
"_meta": {
|
||||
"title": "Load CLIP"
|
||||
}
|
||||
},
|
||||
"16": {
|
||||
"inputs": {
|
||||
"text": "hard bop, upright bass, slappy bass, low frequencies, drum kit brushed hi-hat, snare with ghost notes, syncopated, groove",
|
||||
"clip": [
|
||||
"15",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "CLIPTextEncode",
|
||||
"_meta": {
|
||||
"title": "CLIP Text Encode (Prompt)"
|
||||
}
|
||||
},
|
||||
"17": {
|
||||
"inputs": {
|
||||
"text": "",
|
||||
"clip": [
|
||||
"15",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "CLIPTextEncode",
|
||||
"_meta": {
|
||||
"title": "CLIP Text Encode (Prompt)"
|
||||
}
|
||||
},
|
||||
"19": {
|
||||
"inputs": {
|
||||
"cfg": 7,
|
||||
"model": [
|
||||
"14",
|
||||
0
|
||||
],
|
||||
"positive": [
|
||||
"16",
|
||||
0
|
||||
],
|
||||
"negative": [
|
||||
"17",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "CFGGuider",
|
||||
"_meta": {
|
||||
"title": "CFGGuider"
|
||||
}
|
||||
},
|
||||
"21": {
|
||||
"inputs": {
|
||||
"noise_seed": 600769511872395
|
||||
},
|
||||
"class_type": "RandomNoise",
|
||||
"_meta": {
|
||||
"title": "RandomNoise"
|
||||
}
|
||||
},
|
||||
"22": {
|
||||
"inputs": {
|
||||
"noise": [
|
||||
"21",
|
||||
0
|
||||
],
|
||||
"guider": [
|
||||
"19",
|
||||
0
|
||||
],
|
||||
"sampler": [
|
||||
"29",
|
||||
0
|
||||
],
|
||||
"sigmas": [
|
||||
"24",
|
||||
0
|
||||
],
|
||||
"latent_image": [
|
||||
"30",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "SamplerCustomAdvanced",
|
||||
"_meta": {
|
||||
"title": "SamplerCustomAdvanced"
|
||||
}
|
||||
},
|
||||
"24": {
|
||||
"inputs": {
|
||||
"scheduler": "exponential",
|
||||
"steps": 20,
|
||||
"denoise": 1,
|
||||
"model": [
|
||||
"14",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "BasicScheduler",
|
||||
"_meta": {
|
||||
"title": "BasicScheduler"
|
||||
}
|
||||
},
|
||||
"26": {
|
||||
"inputs": {
|
||||
"samples": [
|
||||
"22",
|
||||
1
|
||||
],
|
||||
"vae": [
|
||||
"14",
|
||||
2
|
||||
]
|
||||
},
|
||||
"class_type": "VAEDecodeAudio",
|
||||
"_meta": {
|
||||
"title": "VAEDecodeAudio"
|
||||
}
|
||||
},
|
||||
"27": {
|
||||
"inputs": {
|
||||
"filename_prefix": "audio/objectobject",
|
||||
"audio": [
|
||||
"26",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "SaveAudio",
|
||||
"_meta": {
|
||||
"title": "SaveAudio"
|
||||
}
|
||||
},
|
||||
"29": {
|
||||
"inputs": {
|
||||
"version": "regular"
|
||||
},
|
||||
"class_type": "SamplerEulerCFGpp",
|
||||
"_meta": {
|
||||
"title": "SamplerEulerCFG++"
|
||||
}
|
||||
},
|
||||
"30": {
|
||||
"inputs": {
|
||||
"seconds": 47.6
|
||||
},
|
||||
"class_type": "EmptyLatentAudio",
|
||||
"_meta": {
|
||||
"title": "EmptyLatentAudio"
|
||||
}
|
||||
},
|
||||
"31": {
|
||||
"inputs": {
|
||||
"filename_prefix": "latents/ComfyUI",
|
||||
"samples": [
|
||||
"22",
|
||||
1
|
||||
]
|
||||
},
|
||||
"class_type": "SaveLatent",
|
||||
"_meta": {
|
||||
"title": "SaveLatent"
|
||||
}
|
||||
}
|
||||
},
|
||||
"auraflow_1": {
|
||||
"1": {
|
||||
"inputs": {
|
||||
"ckpt_name": "aura_flow_0.1.safetensors"
|
||||
},
|
||||
"class_type": "CheckpointLoaderSimple",
|
||||
"_meta": {
|
||||
"title": "Load Checkpoint"
|
||||
}
|
||||
},
|
||||
"2": {
|
||||
"inputs": {
|
||||
"shift": 1.73,
|
||||
"model": [
|
||||
"1",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "ModelSamplingAuraFlow",
|
||||
"_meta": {
|
||||
"title": "ModelSamplingAuraFlow"
|
||||
}
|
||||
},
|
||||
"3": {
|
||||
"inputs": {
|
||||
"seed": 232240565010917,
|
||||
"steps": 25,
|
||||
"cfg": 3.5,
|
||||
"sampler_name": "uni_pc",
|
||||
"scheduler": "normal",
|
||||
"denoise": 1,
|
||||
"model": [
|
||||
"2",
|
||||
0
|
||||
],
|
||||
"positive": [
|
||||
"4",
|
||||
0
|
||||
],
|
||||
"negative": [
|
||||
"5",
|
||||
0
|
||||
],
|
||||
"latent_image": [
|
||||
"6",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "KSampler",
|
||||
"_meta": {
|
||||
"title": "KSampler"
|
||||
}
|
||||
},
|
||||
"4": {
|
||||
"inputs": {
|
||||
"text": "close-up portrait of cat",
|
||||
"clip": [
|
||||
"1",
|
||||
1
|
||||
]
|
||||
},
|
||||
"class_type": "CLIPTextEncode",
|
||||
"_meta": {
|
||||
"title": "CLIP Text Encode (Prompt)"
|
||||
}
|
||||
},
|
||||
"5": {
|
||||
"inputs": {
|
||||
"text": "",
|
||||
"clip": [
|
||||
"1",
|
||||
1
|
||||
]
|
||||
},
|
||||
"class_type": "CLIPTextEncode",
|
||||
"_meta": {
|
||||
"title": "CLIP Text Encode (Prompt)"
|
||||
}
|
||||
},
|
||||
"6": {
|
||||
"inputs": {
|
||||
"width": 1024,
|
||||
"height": 1024,
|
||||
"batch_size": 1
|
||||
},
|
||||
"class_type": "EmptyLatentImage",
|
||||
"_meta": {
|
||||
"title": "Empty Latent Image"
|
||||
}
|
||||
},
|
||||
"7": {
|
||||
"inputs": {
|
||||
"samples": [
|
||||
"3",
|
||||
0
|
||||
],
|
||||
"vae": [
|
||||
"1",
|
||||
2
|
||||
]
|
||||
},
|
||||
"class_type": "VAEDecode",
|
||||
"_meta": {
|
||||
"title": "VAE Decode"
|
||||
}
|
||||
},
|
||||
"8": {
|
||||
"inputs": {
|
||||
"filename_prefix": "ComfyUI",
|
||||
"images": [
|
||||
"7",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "SaveImage",
|
||||
"_meta": {
|
||||
"title": "Save Image"
|
||||
}
|
||||
}
|
||||
},
|
||||
"lora_1": {
|
||||
"3": {
|
||||
"inputs": {
|
||||
"seed": 851616030078638,
|
||||
"steps": 20,
|
||||
"cfg": 8,
|
||||
"sampler_name": "euler",
|
||||
"scheduler": "normal",
|
||||
"denoise": 1,
|
||||
"model": [
|
||||
"10",
|
||||
0
|
||||
],
|
||||
"positive": [
|
||||
"6",
|
||||
0
|
||||
],
|
||||
"negative": [
|
||||
"7",
|
||||
0
|
||||
],
|
||||
"latent_image": [
|
||||
"5",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "KSampler",
|
||||
"_meta": {
|
||||
"title": "KSampler"
|
||||
}
|
||||
},
|
||||
"4": {
|
||||
"inputs": {
|
||||
"ckpt_name": "v1-5-pruned-emaonly.safetensors"
|
||||
},
|
||||
"class_type": "CheckpointLoaderSimple",
|
||||
"_meta": {
|
||||
"title": "Load Checkpoint"
|
||||
}
|
||||
},
|
||||
"5": {
|
||||
"inputs": {
|
||||
"width": 512,
|
||||
"height": 512,
|
||||
"batch_size": 1
|
||||
},
|
||||
"class_type": "EmptyLatentImage",
|
||||
"_meta": {
|
||||
"title": "Empty Latent Image"
|
||||
}
|
||||
},
|
||||
"6": {
|
||||
"inputs": {
|
||||
"text": "masterpiece best quality girl",
|
||||
"clip": [
|
||||
"10",
|
||||
1
|
||||
]
|
||||
},
|
||||
"class_type": "CLIPTextEncode",
|
||||
"_meta": {
|
||||
"title": "CLIP Text Encode (Prompt)"
|
||||
}
|
||||
},
|
||||
"7": {
|
||||
"inputs": {
|
||||
"text": "bad hands",
|
||||
"clip": [
|
||||
"10",
|
||||
1
|
||||
]
|
||||
},
|
||||
"class_type": "CLIPTextEncode",
|
||||
"_meta": {
|
||||
"title": "CLIP Text Encode (Prompt)"
|
||||
}
|
||||
},
|
||||
"8": {
|
||||
"inputs": {
|
||||
"samples": [
|
||||
"3",
|
||||
0
|
||||
],
|
||||
"vae": [
|
||||
"4",
|
||||
2
|
||||
]
|
||||
},
|
||||
"class_type": "VAEDecode",
|
||||
"_meta": {
|
||||
"title": "VAE Decode"
|
||||
}
|
||||
},
|
||||
"9": {
|
||||
"inputs": {
|
||||
"filename_prefix": "ComfyUI",
|
||||
"images": [
|
||||
"8",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "SaveImage",
|
||||
"_meta": {
|
||||
"title": "Save Image"
|
||||
}
|
||||
},
|
||||
"10": {
|
||||
"inputs": {
|
||||
"lora_name": "epi_noiseoffset2.safetensors",
|
||||
"strength_model": 1,
|
||||
"strength_clip": 1,
|
||||
"model": [
|
||||
"4",
|
||||
0
|
||||
],
|
||||
"clip": [
|
||||
"4",
|
||||
1
|
||||
]
|
||||
},
|
||||
"class_type": "LoraLoader",
|
||||
"_meta": {
|
||||
"title": "Load LoRA"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
from . import workflows
|
||||
|
||||
|
||||
@pytest.fixture(scope="module", autouse=False)
|
||||
@ -549,9 +18,15 @@ async def client(tmp_path_factory) -> EmbeddedComfyClient:
|
||||
yield client
|
||||
|
||||
|
||||
def _prepare_for_workflows() -> dict[str, Traversable]:
|
||||
add_known_models("loras", KNOWN_LORAS, CivitFile(13941, 16576, "epi_noiseoffset2.safetensors"))
|
||||
|
||||
return {f.name: f for f in importlib.resources.files(workflows).iterdir() if f.is_file() and f.name.endswith(".json")}
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.parametrize("workflow_name, workflow", _workflows.items())
|
||||
async def test_workflow(workflow_name: str, workflow: dict, has_gpu: bool, client: EmbeddedComfyClient):
|
||||
@pytest.mark.parametrize("workflow_name, workflow_file", _prepare_for_workflows().items())
|
||||
async def test_workflow(workflow_name: str, workflow_file: Traversable, has_gpu: bool, client: EmbeddedComfyClient):
|
||||
if not has_gpu:
|
||||
pytest.skip("requires gpu")
|
||||
|
||||
@ -561,8 +36,9 @@ async def test_workflow(workflow_name: str, workflow: dict, has_gpu: bool, clien
|
||||
except (ImportError, ModuleNotFoundError):
|
||||
pytest.skip("requires torchaudio")
|
||||
|
||||
workflow = json.loads(workflow_file.read_text())
|
||||
|
||||
prompt = Prompt.validate(workflow)
|
||||
add_known_models("loras", KNOWN_LORAS, CivitFile(13941, 16576, "epi_noiseoffset2.safetensors"))
|
||||
# todo: add all the models we want to test a bit more elegantly
|
||||
outputs = await client.queue_prompt(prompt)
|
||||
|
||||
|
||||
0
tests/inference/workflows/__init__.py
Normal file
0
tests/inference/workflows/__init__.py
Normal file
180
tests/inference/workflows/audio-0.json
Normal file
180
tests/inference/workflows/audio-0.json
Normal file
@ -0,0 +1,180 @@
|
||||
{
|
||||
"14": {
|
||||
"inputs": {
|
||||
"ckpt_name": "stable_audio_open_1.0.safetensors"
|
||||
},
|
||||
"class_type": "CheckpointLoaderSimple",
|
||||
"_meta": {
|
||||
"title": "Load Checkpoint"
|
||||
}
|
||||
},
|
||||
"15": {
|
||||
"inputs": {
|
||||
"clip_name": "t5_base.safetensors",
|
||||
"type": "stable_audio"
|
||||
},
|
||||
"class_type": "CLIPLoader",
|
||||
"_meta": {
|
||||
"title": "Load CLIP"
|
||||
}
|
||||
},
|
||||
"16": {
|
||||
"inputs": {
|
||||
"text": "hard bop, upright bass, slappy bass, low frequencies, drum kit brushed hi-hat, snare with ghost notes, syncopated, groove",
|
||||
"clip": [
|
||||
"15",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "CLIPTextEncode",
|
||||
"_meta": {
|
||||
"title": "CLIP Text Encode (Prompt)"
|
||||
}
|
||||
},
|
||||
"17": {
|
||||
"inputs": {
|
||||
"text": "",
|
||||
"clip": [
|
||||
"15",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "CLIPTextEncode",
|
||||
"_meta": {
|
||||
"title": "CLIP Text Encode (Prompt)"
|
||||
}
|
||||
},
|
||||
"19": {
|
||||
"inputs": {
|
||||
"cfg": 7,
|
||||
"model": [
|
||||
"14",
|
||||
0
|
||||
],
|
||||
"positive": [
|
||||
"16",
|
||||
0
|
||||
],
|
||||
"negative": [
|
||||
"17",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "CFGGuider",
|
||||
"_meta": {
|
||||
"title": "CFGGuider"
|
||||
}
|
||||
},
|
||||
"21": {
|
||||
"inputs": {
|
||||
"noise_seed": 600769511872395
|
||||
},
|
||||
"class_type": "RandomNoise",
|
||||
"_meta": {
|
||||
"title": "RandomNoise"
|
||||
}
|
||||
},
|
||||
"22": {
|
||||
"inputs": {
|
||||
"noise": [
|
||||
"21",
|
||||
0
|
||||
],
|
||||
"guider": [
|
||||
"19",
|
||||
0
|
||||
],
|
||||
"sampler": [
|
||||
"29",
|
||||
0
|
||||
],
|
||||
"sigmas": [
|
||||
"24",
|
||||
0
|
||||
],
|
||||
"latent_image": [
|
||||
"30",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "SamplerCustomAdvanced",
|
||||
"_meta": {
|
||||
"title": "SamplerCustomAdvanced"
|
||||
}
|
||||
},
|
||||
"24": {
|
||||
"inputs": {
|
||||
"scheduler": "exponential",
|
||||
"steps": 20,
|
||||
"denoise": 1,
|
||||
"model": [
|
||||
"14",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "BasicScheduler",
|
||||
"_meta": {
|
||||
"title": "BasicScheduler"
|
||||
}
|
||||
},
|
||||
"26": {
|
||||
"inputs": {
|
||||
"samples": [
|
||||
"22",
|
||||
1
|
||||
],
|
||||
"vae": [
|
||||
"14",
|
||||
2
|
||||
]
|
||||
},
|
||||
"class_type": "VAEDecodeAudio",
|
||||
"_meta": {
|
||||
"title": "VAEDecodeAudio"
|
||||
}
|
||||
},
|
||||
"27": {
|
||||
"inputs": {
|
||||
"filename_prefix": "audio/objectobject",
|
||||
"audio": [
|
||||
"26",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "SaveAudio",
|
||||
"_meta": {
|
||||
"title": "SaveAudio"
|
||||
}
|
||||
},
|
||||
"29": {
|
||||
"inputs": {
|
||||
"version": "regular"
|
||||
},
|
||||
"class_type": "SamplerEulerCFGpp",
|
||||
"_meta": {
|
||||
"title": "SamplerEulerCFG++"
|
||||
}
|
||||
},
|
||||
"30": {
|
||||
"inputs": {
|
||||
"seconds": 47.6
|
||||
},
|
||||
"class_type": "EmptyLatentAudio",
|
||||
"_meta": {
|
||||
"title": "EmptyLatentAudio"
|
||||
}
|
||||
},
|
||||
"31": {
|
||||
"inputs": {
|
||||
"filename_prefix": "latents/ComfyUI",
|
||||
"samples": [
|
||||
"22",
|
||||
1
|
||||
]
|
||||
},
|
||||
"class_type": "SaveLatent",
|
||||
"_meta": {
|
||||
"title": "SaveLatent"
|
||||
}
|
||||
}
|
||||
}
|
||||
120
tests/inference/workflows/auraflow-0.json
Normal file
120
tests/inference/workflows/auraflow-0.json
Normal file
@ -0,0 +1,120 @@
|
||||
{
|
||||
"1": {
|
||||
"inputs": {
|
||||
"ckpt_name": "aura_flow_0.1.safetensors"
|
||||
},
|
||||
"class_type": "CheckpointLoaderSimple",
|
||||
"_meta": {
|
||||
"title": "Load Checkpoint"
|
||||
}
|
||||
},
|
||||
"2": {
|
||||
"inputs": {
|
||||
"shift": 1.73,
|
||||
"model": [
|
||||
"1",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "ModelSamplingAuraFlow",
|
||||
"_meta": {
|
||||
"title": "ModelSamplingAuraFlow"
|
||||
}
|
||||
},
|
||||
"3": {
|
||||
"inputs": {
|
||||
"seed": 232240565010917,
|
||||
"steps": 25,
|
||||
"cfg": 3.5,
|
||||
"sampler_name": "uni_pc",
|
||||
"scheduler": "normal",
|
||||
"denoise": 1,
|
||||
"model": [
|
||||
"2",
|
||||
0
|
||||
],
|
||||
"positive": [
|
||||
"4",
|
||||
0
|
||||
],
|
||||
"negative": [
|
||||
"5",
|
||||
0
|
||||
],
|
||||
"latent_image": [
|
||||
"6",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "KSampler",
|
||||
"_meta": {
|
||||
"title": "KSampler"
|
||||
}
|
||||
},
|
||||
"4": {
|
||||
"inputs": {
|
||||
"text": "close-up portrait of cat",
|
||||
"clip": [
|
||||
"1",
|
||||
1
|
||||
]
|
||||
},
|
||||
"class_type": "CLIPTextEncode",
|
||||
"_meta": {
|
||||
"title": "CLIP Text Encode (Prompt)"
|
||||
}
|
||||
},
|
||||
"5": {
|
||||
"inputs": {
|
||||
"text": "",
|
||||
"clip": [
|
||||
"1",
|
||||
1
|
||||
]
|
||||
},
|
||||
"class_type": "CLIPTextEncode",
|
||||
"_meta": {
|
||||
"title": "CLIP Text Encode (Prompt)"
|
||||
}
|
||||
},
|
||||
"6": {
|
||||
"inputs": {
|
||||
"width": 1024,
|
||||
"height": 1024,
|
||||
"batch_size": 1
|
||||
},
|
||||
"class_type": "EmptyLatentImage",
|
||||
"_meta": {
|
||||
"title": "Empty Latent Image"
|
||||
}
|
||||
},
|
||||
"7": {
|
||||
"inputs": {
|
||||
"samples": [
|
||||
"3",
|
||||
0
|
||||
],
|
||||
"vae": [
|
||||
"1",
|
||||
2
|
||||
]
|
||||
},
|
||||
"class_type": "VAEDecode",
|
||||
"_meta": {
|
||||
"title": "VAE Decode"
|
||||
}
|
||||
},
|
||||
"8": {
|
||||
"inputs": {
|
||||
"filename_prefix": "ComfyUI",
|
||||
"images": [
|
||||
"7",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "SaveImage",
|
||||
"_meta": {
|
||||
"title": "Save Image"
|
||||
}
|
||||
}
|
||||
}
|
||||
107
tests/inference/workflows/default-0.json
Normal file
107
tests/inference/workflows/default-0.json
Normal file
@ -0,0 +1,107 @@
|
||||
{
|
||||
"3": {
|
||||
"inputs": {
|
||||
"seed": 156680208700286,
|
||||
"steps": 20,
|
||||
"cfg": 8,
|
||||
"sampler_name": "euler",
|
||||
"scheduler": "normal",
|
||||
"denoise": 1,
|
||||
"model": [
|
||||
"4",
|
||||
0
|
||||
],
|
||||
"positive": [
|
||||
"6",
|
||||
0
|
||||
],
|
||||
"negative": [
|
||||
"7",
|
||||
0
|
||||
],
|
||||
"latent_image": [
|
||||
"5",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "KSampler",
|
||||
"_meta": {
|
||||
"title": "KSampler"
|
||||
}
|
||||
},
|
||||
"4": {
|
||||
"inputs": {
|
||||
"ckpt_name": "v1-5-pruned-emaonly.safetensors"
|
||||
},
|
||||
"class_type": "CheckpointLoaderSimple",
|
||||
"_meta": {
|
||||
"title": "Load Checkpoint"
|
||||
}
|
||||
},
|
||||
"5": {
|
||||
"inputs": {
|
||||
"width": 512,
|
||||
"height": 512,
|
||||
"batch_size": 1
|
||||
},
|
||||
"class_type": "EmptyLatentImage",
|
||||
"_meta": {
|
||||
"title": "Empty Latent Image"
|
||||
}
|
||||
},
|
||||
"6": {
|
||||
"inputs": {
|
||||
"text": "beautiful scenery nature glass bottle landscape, , purple galaxy bottle,",
|
||||
"clip": [
|
||||
"4",
|
||||
1
|
||||
]
|
||||
},
|
||||
"class_type": "CLIPTextEncode",
|
||||
"_meta": {
|
||||
"title": "CLIP Text Encode (Prompt)"
|
||||
}
|
||||
},
|
||||
"7": {
|
||||
"inputs": {
|
||||
"text": "text, watermark",
|
||||
"clip": [
|
||||
"4",
|
||||
1
|
||||
]
|
||||
},
|
||||
"class_type": "CLIPTextEncode",
|
||||
"_meta": {
|
||||
"title": "CLIP Text Encode (Prompt)"
|
||||
}
|
||||
},
|
||||
"8": {
|
||||
"inputs": {
|
||||
"samples": [
|
||||
"3",
|
||||
0
|
||||
],
|
||||
"vae": [
|
||||
"4",
|
||||
2
|
||||
]
|
||||
},
|
||||
"class_type": "VAEDecode",
|
||||
"_meta": {
|
||||
"title": "VAE Decode"
|
||||
}
|
||||
},
|
||||
"9": {
|
||||
"inputs": {
|
||||
"filename_prefix": "ComfyUI",
|
||||
"images": [
|
||||
"8",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "SaveImage",
|
||||
"_meta": {
|
||||
"title": "Save Image"
|
||||
}
|
||||
}
|
||||
}
|
||||
107
tests/inference/workflows/hunyuandit-0.json
Normal file
107
tests/inference/workflows/hunyuandit-0.json
Normal file
@ -0,0 +1,107 @@
|
||||
{
|
||||
"3": {
|
||||
"inputs": {
|
||||
"seed": 377072733774956,
|
||||
"steps": 1,
|
||||
"cfg": 6,
|
||||
"sampler_name": "euler",
|
||||
"scheduler": "sgm_uniform",
|
||||
"denoise": 1,
|
||||
"model": [
|
||||
"4",
|
||||
0
|
||||
],
|
||||
"positive": [
|
||||
"6",
|
||||
0
|
||||
],
|
||||
"negative": [
|
||||
"7",
|
||||
0
|
||||
],
|
||||
"latent_image": [
|
||||
"5",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "KSampler",
|
||||
"_meta": {
|
||||
"title": "KSampler"
|
||||
}
|
||||
},
|
||||
"4": {
|
||||
"inputs": {
|
||||
"ckpt_name": "hunyuan_dit_1.2.safetensors"
|
||||
},
|
||||
"class_type": "CheckpointLoaderSimple",
|
||||
"_meta": {
|
||||
"title": "Load Checkpoint"
|
||||
}
|
||||
},
|
||||
"5": {
|
||||
"inputs": {
|
||||
"width": 1024,
|
||||
"height": 1024,
|
||||
"batch_size": 1
|
||||
},
|
||||
"class_type": "EmptyLatentImage",
|
||||
"_meta": {
|
||||
"title": "Empty Latent Image"
|
||||
}
|
||||
},
|
||||
"6": {
|
||||
"inputs": {
|
||||
"text": "a bottle with a 彩虹星系 inside it on top of a wooden table on a snowy mountain top with the ocean and clouds in the background\n",
|
||||
"clip": [
|
||||
"4",
|
||||
1
|
||||
]
|
||||
},
|
||||
"class_type": "CLIPTextEncode",
|
||||
"_meta": {
|
||||
"title": "CLIP Text Encode (Positive Prompt)"
|
||||
}
|
||||
},
|
||||
"7": {
|
||||
"inputs": {
|
||||
"text": "",
|
||||
"clip": [
|
||||
"4",
|
||||
1
|
||||
]
|
||||
},
|
||||
"class_type": "CLIPTextEncode",
|
||||
"_meta": {
|
||||
"title": "CLIP Text Encode (Negative Prompt)"
|
||||
}
|
||||
},
|
||||
"8": {
|
||||
"inputs": {
|
||||
"samples": [
|
||||
"3",
|
||||
0
|
||||
],
|
||||
"vae": [
|
||||
"4",
|
||||
2
|
||||
]
|
||||
},
|
||||
"class_type": "VAEDecode",
|
||||
"_meta": {
|
||||
"title": "VAE Decode"
|
||||
}
|
||||
},
|
||||
"9": {
|
||||
"inputs": {
|
||||
"filename_prefix": "ComfyUI",
|
||||
"images": [
|
||||
"8",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "SaveImage",
|
||||
"_meta": {
|
||||
"title": "Save Image"
|
||||
}
|
||||
}
|
||||
}
|
||||
126
tests/inference/workflows/sd-1.5-lora-0.json
Normal file
126
tests/inference/workflows/sd-1.5-lora-0.json
Normal file
@ -0,0 +1,126 @@
|
||||
{
|
||||
"3": {
|
||||
"inputs": {
|
||||
"seed": 851616030078638,
|
||||
"steps": 20,
|
||||
"cfg": 8,
|
||||
"sampler_name": "euler",
|
||||
"scheduler": "normal",
|
||||
"denoise": 1,
|
||||
"model": [
|
||||
"10",
|
||||
0
|
||||
],
|
||||
"positive": [
|
||||
"6",
|
||||
0
|
||||
],
|
||||
"negative": [
|
||||
"7",
|
||||
0
|
||||
],
|
||||
"latent_image": [
|
||||
"5",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "KSampler",
|
||||
"_meta": {
|
||||
"title": "KSampler"
|
||||
}
|
||||
},
|
||||
"4": {
|
||||
"inputs": {
|
||||
"ckpt_name": "v1-5-pruned-emaonly.safetensors"
|
||||
},
|
||||
"class_type": "CheckpointLoaderSimple",
|
||||
"_meta": {
|
||||
"title": "Load Checkpoint"
|
||||
}
|
||||
},
|
||||
"5": {
|
||||
"inputs": {
|
||||
"width": 512,
|
||||
"height": 512,
|
||||
"batch_size": 1
|
||||
},
|
||||
"class_type": "EmptyLatentImage",
|
||||
"_meta": {
|
||||
"title": "Empty Latent Image"
|
||||
}
|
||||
},
|
||||
"6": {
|
||||
"inputs": {
|
||||
"text": "masterpiece best quality girl",
|
||||
"clip": [
|
||||
"10",
|
||||
1
|
||||
]
|
||||
},
|
||||
"class_type": "CLIPTextEncode",
|
||||
"_meta": {
|
||||
"title": "CLIP Text Encode (Prompt)"
|
||||
}
|
||||
},
|
||||
"7": {
|
||||
"inputs": {
|
||||
"text": "bad hands",
|
||||
"clip": [
|
||||
"10",
|
||||
1
|
||||
]
|
||||
},
|
||||
"class_type": "CLIPTextEncode",
|
||||
"_meta": {
|
||||
"title": "CLIP Text Encode (Prompt)"
|
||||
}
|
||||
},
|
||||
"8": {
|
||||
"inputs": {
|
||||
"samples": [
|
||||
"3",
|
||||
0
|
||||
],
|
||||
"vae": [
|
||||
"4",
|
||||
2
|
||||
]
|
||||
},
|
||||
"class_type": "VAEDecode",
|
||||
"_meta": {
|
||||
"title": "VAE Decode"
|
||||
}
|
||||
},
|
||||
"9": {
|
||||
"inputs": {
|
||||
"filename_prefix": "ComfyUI",
|
||||
"images": [
|
||||
"8",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "SaveImage",
|
||||
"_meta": {
|
||||
"title": "Save Image"
|
||||
}
|
||||
},
|
||||
"10": {
|
||||
"inputs": {
|
||||
"lora_name": "epi_noiseoffset2.safetensors",
|
||||
"strength_model": 1,
|
||||
"strength_clip": 1,
|
||||
"model": [
|
||||
"4",
|
||||
0
|
||||
],
|
||||
"clip": [
|
||||
"4",
|
||||
1
|
||||
]
|
||||
},
|
||||
"class_type": "LoraLoader",
|
||||
"_meta": {
|
||||
"title": "Load LoRA"
|
||||
}
|
||||
}
|
||||
}
|
||||
187
tests/inference/workflows/sd3-default-0.json
Normal file
187
tests/inference/workflows/sd3-default-0.json
Normal file
@ -0,0 +1,187 @@
|
||||
{
|
||||
"6": {
|
||||
"inputs": {
|
||||
"text": "a female character with long, flowing hair that appears to be made of ethereal, swirling patterns resembling the Northern Lights or Aurora Borealis. The background is dominated by deep blues and purples, creating a mysterious and dramatic atmosphere. The character's face is serene, with pale skin and striking features. She wears a dark-colored outfit with subtle patterns. The overall style of the artwork is reminiscent of fantasy or supernatural genres",
|
||||
"clip": [
|
||||
"11",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "CLIPTextEncode",
|
||||
"_meta": {
|
||||
"title": "CLIP Text Encode (Prompt)"
|
||||
}
|
||||
},
|
||||
"11": {
|
||||
"inputs": {
|
||||
"clip_name1": "clip_g.safetensors",
|
||||
"clip_name2": "clip_l.safetensors",
|
||||
"clip_name3": "t5xxl_fp8_e4m3fn.safetensors"
|
||||
},
|
||||
"class_type": "TripleCLIPLoader",
|
||||
"_meta": {
|
||||
"title": "TripleCLIPLoader"
|
||||
}
|
||||
},
|
||||
"13": {
|
||||
"inputs": {
|
||||
"shift": 3,
|
||||
"model": [
|
||||
"252",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "ModelSamplingSD3",
|
||||
"_meta": {
|
||||
"title": "ModelSamplingSD3"
|
||||
}
|
||||
},
|
||||
"67": {
|
||||
"inputs": {
|
||||
"conditioning": [
|
||||
"71",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "ConditioningZeroOut",
|
||||
"_meta": {
|
||||
"title": "ConditioningZeroOut"
|
||||
}
|
||||
},
|
||||
"68": {
|
||||
"inputs": {
|
||||
"start": 0.1,
|
||||
"end": 1,
|
||||
"conditioning": [
|
||||
"67",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "ConditioningSetTimestepRange",
|
||||
"_meta": {
|
||||
"title": "ConditioningSetTimestepRange"
|
||||
}
|
||||
},
|
||||
"69": {
|
||||
"inputs": {
|
||||
"conditioning_1": [
|
||||
"68",
|
||||
0
|
||||
],
|
||||
"conditioning_2": [
|
||||
"70",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "ConditioningCombine",
|
||||
"_meta": {
|
||||
"title": "Conditioning (Combine)"
|
||||
}
|
||||
},
|
||||
"70": {
|
||||
"inputs": {
|
||||
"start": 0,
|
||||
"end": 0.1,
|
||||
"conditioning": [
|
||||
"71",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "ConditioningSetTimestepRange",
|
||||
"_meta": {
|
||||
"title": "ConditioningSetTimestepRange"
|
||||
}
|
||||
},
|
||||
"71": {
|
||||
"inputs": {
|
||||
"text": "bad quality, poor quality, doll, disfigured, jpg, toy, bad anatomy, missing limbs, missing fingers, 3d, cgi",
|
||||
"clip": [
|
||||
"11",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "CLIPTextEncode",
|
||||
"_meta": {
|
||||
"title": "CLIP Text Encode (Negative Prompt)"
|
||||
}
|
||||
},
|
||||
"135": {
|
||||
"inputs": {
|
||||
"width": 1024,
|
||||
"height": 1024,
|
||||
"batch_size": 1
|
||||
},
|
||||
"class_type": "EmptySD3LatentImage",
|
||||
"_meta": {
|
||||
"title": "EmptySD3LatentImage"
|
||||
}
|
||||
},
|
||||
"231": {
|
||||
"inputs": {
|
||||
"samples": [
|
||||
"271",
|
||||
0
|
||||
],
|
||||
"vae": [
|
||||
"252",
|
||||
2
|
||||
]
|
||||
},
|
||||
"class_type": "VAEDecode",
|
||||
"_meta": {
|
||||
"title": "VAE Decode"
|
||||
}
|
||||
},
|
||||
"233": {
|
||||
"inputs": {
|
||||
"filename_prefix": "ComfyUI",
|
||||
"images": [
|
||||
"231",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "SaveImage",
|
||||
"_meta": {
|
||||
"title": "Save Image"
|
||||
}
|
||||
},
|
||||
"252": {
|
||||
"inputs": {
|
||||
"ckpt_name": "sd3_medium.safetensors"
|
||||
},
|
||||
"class_type": "CheckpointLoaderSimple",
|
||||
"_meta": {
|
||||
"title": "Load Checkpoint"
|
||||
}
|
||||
},
|
||||
"271": {
|
||||
"inputs": {
|
||||
"seed": 945512652412924,
|
||||
"steps": 28,
|
||||
"cfg": 4.5,
|
||||
"sampler_name": "dpmpp_2m",
|
||||
"scheduler": "sgm_uniform",
|
||||
"denoise": 1,
|
||||
"model": [
|
||||
"13",
|
||||
0
|
||||
],
|
||||
"positive": [
|
||||
"6",
|
||||
0
|
||||
],
|
||||
"negative": [
|
||||
"69",
|
||||
0
|
||||
],
|
||||
"latent_image": [
|
||||
"135",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "KSampler",
|
||||
"_meta": {
|
||||
"title": "KSampler"
|
||||
}
|
||||
}
|
||||
}
|
||||
189
tests/inference/workflows/sd3-multiprompt-0.json
Normal file
189
tests/inference/workflows/sd3-multiprompt-0.json
Normal file
@ -0,0 +1,189 @@
|
||||
{
|
||||
"11": {
|
||||
"inputs": {
|
||||
"clip_name1": "clip_g.safetensors",
|
||||
"clip_name2": "clip_l.safetensors",
|
||||
"clip_name3": "t5xxl_fp8_e4m3fn.safetensors"
|
||||
},
|
||||
"class_type": "TripleCLIPLoader",
|
||||
"_meta": {
|
||||
"title": "TripleCLIPLoader"
|
||||
}
|
||||
},
|
||||
"13": {
|
||||
"inputs": {
|
||||
"shift": 3,
|
||||
"model": [
|
||||
"252",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "ModelSamplingSD3",
|
||||
"_meta": {
|
||||
"title": "ModelSamplingSD3"
|
||||
}
|
||||
},
|
||||
"67": {
|
||||
"inputs": {
|
||||
"conditioning": [
|
||||
"71",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "ConditioningZeroOut",
|
||||
"_meta": {
|
||||
"title": "ConditioningZeroOut"
|
||||
}
|
||||
},
|
||||
"68": {
|
||||
"inputs": {
|
||||
"start": 0.1,
|
||||
"end": 1,
|
||||
"conditioning": [
|
||||
"67",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "ConditioningSetTimestepRange",
|
||||
"_meta": {
|
||||
"title": "ConditioningSetTimestepRange"
|
||||
}
|
||||
},
|
||||
"69": {
|
||||
"inputs": {
|
||||
"conditioning_1": [
|
||||
"68",
|
||||
0
|
||||
],
|
||||
"conditioning_2": [
|
||||
"70",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "ConditioningCombine",
|
||||
"_meta": {
|
||||
"title": "Conditioning (Combine)"
|
||||
}
|
||||
},
|
||||
"70": {
|
||||
"inputs": {
|
||||
"start": 0,
|
||||
"end": 0.1,
|
||||
"conditioning": [
|
||||
"71",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "ConditioningSetTimestepRange",
|
||||
"_meta": {
|
||||
"title": "ConditioningSetTimestepRange"
|
||||
}
|
||||
},
|
||||
"71": {
|
||||
"inputs": {
|
||||
"text": "bad quality, poor quality, doll, disfigured, jpg, toy, bad anatomy, missing limbs, missing fingers, 3d, cgi",
|
||||
"clip": [
|
||||
"11",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "CLIPTextEncode",
|
||||
"_meta": {
|
||||
"title": "CLIP Text Encode (Negative Prompt)"
|
||||
}
|
||||
},
|
||||
"135": {
|
||||
"inputs": {
|
||||
"width": 1024,
|
||||
"height": 1024,
|
||||
"batch_size": 1
|
||||
},
|
||||
"class_type": "EmptySD3LatentImage",
|
||||
"_meta": {
|
||||
"title": "EmptySD3LatentImage"
|
||||
}
|
||||
},
|
||||
"231": {
|
||||
"inputs": {
|
||||
"samples": [
|
||||
"271",
|
||||
0
|
||||
],
|
||||
"vae": [
|
||||
"252",
|
||||
2
|
||||
]
|
||||
},
|
||||
"class_type": "VAEDecode",
|
||||
"_meta": {
|
||||
"title": "VAE Decode"
|
||||
}
|
||||
},
|
||||
"233": {
|
||||
"inputs": {
|
||||
"images": [
|
||||
"231",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "PreviewImage",
|
||||
"_meta": {
|
||||
"title": "Preview Image"
|
||||
}
|
||||
},
|
||||
"252": {
|
||||
"inputs": {
|
||||
"ckpt_name": "sd3_medium.safetensors"
|
||||
},
|
||||
"class_type": "CheckpointLoaderSimple",
|
||||
"_meta": {
|
||||
"title": "Load Checkpoint"
|
||||
}
|
||||
},
|
||||
"271": {
|
||||
"inputs": {
|
||||
"seed": 945512652412924,
|
||||
"steps": 28,
|
||||
"cfg": 4.5,
|
||||
"sampler_name": "dpmpp_2m",
|
||||
"scheduler": "sgm_uniform",
|
||||
"denoise": 1,
|
||||
"model": [
|
||||
"13",
|
||||
0
|
||||
],
|
||||
"positive": [
|
||||
"273",
|
||||
0
|
||||
],
|
||||
"negative": [
|
||||
"69",
|
||||
0
|
||||
],
|
||||
"latent_image": [
|
||||
"135",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "KSampler",
|
||||
"_meta": {
|
||||
"title": "KSampler"
|
||||
}
|
||||
},
|
||||
"273": {
|
||||
"inputs": {
|
||||
"clip_l": "the background is dominated by deep red and purples, creating a mysterious and dramatic atmosphere similar to a volcanic explosion",
|
||||
"clip_g": "the background is dominated by deep red and purples, creating a mysterious and dramatic atmosphere similar to a volcanic explosion",
|
||||
"t5xxl": "portrait of a female character with long, flowing hair that appears to be made of ethereal, swirling patterns resembling the Northern Lights or Aurora Borealis. Her face is serene, with pale skin and striking features. She wears a dark-colored outfit with subtle patterns. The overall style of the artwork is reminiscent of fantasy or supernatural genres\n",
|
||||
"empty_padding": "none",
|
||||
"clip": [
|
||||
"11",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "CLIPTextEncodeSD3",
|
||||
"_meta": {
|
||||
"title": "CLIPTextEncodeSD3"
|
||||
}
|
||||
}
|
||||
}
|
||||
186
tests/inference/workflows/sd3-single-t5-0.json
Normal file
186
tests/inference/workflows/sd3-single-t5-0.json
Normal file
@ -0,0 +1,186 @@
|
||||
{
|
||||
"6": {
|
||||
"inputs": {
|
||||
"text": "a female character with long, flowing hair that appears to be made of ethereal, swirling patterns resembling the Northern Lights or Aurora Borealis. The background is dominated by deep blues and purples, creating a mysterious and dramatic atmosphere. The character's face is serene, with pale skin and striking features. She wears a dark-colored outfit with subtle patterns. The overall style of the artwork is reminiscent of fantasy or supernatural genres",
|
||||
"clip": [
|
||||
"272",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "CLIPTextEncode",
|
||||
"_meta": {
|
||||
"title": "CLIP Text Encode (Prompt)"
|
||||
}
|
||||
},
|
||||
"13": {
|
||||
"inputs": {
|
||||
"shift": 3,
|
||||
"model": [
|
||||
"252",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "ModelSamplingSD3",
|
||||
"_meta": {
|
||||
"title": "ModelSamplingSD3"
|
||||
}
|
||||
},
|
||||
"67": {
|
||||
"inputs": {
|
||||
"conditioning": [
|
||||
"71",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "ConditioningZeroOut",
|
||||
"_meta": {
|
||||
"title": "ConditioningZeroOut"
|
||||
}
|
||||
},
|
||||
"68": {
|
||||
"inputs": {
|
||||
"start": 0.1,
|
||||
"end": 1,
|
||||
"conditioning": [
|
||||
"67",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "ConditioningSetTimestepRange",
|
||||
"_meta": {
|
||||
"title": "ConditioningSetTimestepRange"
|
||||
}
|
||||
},
|
||||
"69": {
|
||||
"inputs": {
|
||||
"conditioning_1": [
|
||||
"68",
|
||||
0
|
||||
],
|
||||
"conditioning_2": [
|
||||
"70",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "ConditioningCombine",
|
||||
"_meta": {
|
||||
"title": "Conditioning (Combine)"
|
||||
}
|
||||
},
|
||||
"70": {
|
||||
"inputs": {
|
||||
"start": 0,
|
||||
"end": 0.1,
|
||||
"conditioning": [
|
||||
"71",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "ConditioningSetTimestepRange",
|
||||
"_meta": {
|
||||
"title": "ConditioningSetTimestepRange"
|
||||
}
|
||||
},
|
||||
"71": {
|
||||
"inputs": {
|
||||
"text": "bad quality, poor quality, doll, disfigured, jpg, toy, bad anatomy, missing limbs, missing fingers, 3d, cgi",
|
||||
"clip": [
|
||||
"272",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "CLIPTextEncode",
|
||||
"_meta": {
|
||||
"title": "CLIP Text Encode (Negative Prompt)"
|
||||
}
|
||||
},
|
||||
"135": {
|
||||
"inputs": {
|
||||
"width": 1024,
|
||||
"height": 1024,
|
||||
"batch_size": 1
|
||||
},
|
||||
"class_type": "EmptySD3LatentImage",
|
||||
"_meta": {
|
||||
"title": "EmptySD3LatentImage"
|
||||
}
|
||||
},
|
||||
"231": {
|
||||
"inputs": {
|
||||
"samples": [
|
||||
"271",
|
||||
0
|
||||
],
|
||||
"vae": [
|
||||
"252",
|
||||
2
|
||||
]
|
||||
},
|
||||
"class_type": "VAEDecode",
|
||||
"_meta": {
|
||||
"title": "VAE Decode"
|
||||
}
|
||||
},
|
||||
"233": {
|
||||
"inputs": {
|
||||
"filename_prefix": "ComfyUI",
|
||||
"images": [
|
||||
"231",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "SaveImage",
|
||||
"_meta": {
|
||||
"title": "Save Image"
|
||||
}
|
||||
},
|
||||
"252": {
|
||||
"inputs": {
|
||||
"ckpt_name": "sd3_medium.safetensors"
|
||||
},
|
||||
"class_type": "CheckpointLoaderSimple",
|
||||
"_meta": {
|
||||
"title": "Load Checkpoint"
|
||||
}
|
||||
},
|
||||
"271": {
|
||||
"inputs": {
|
||||
"seed": 780289980632866,
|
||||
"steps": 28,
|
||||
"cfg": 4.5,
|
||||
"sampler_name": "dpmpp_2m",
|
||||
"scheduler": "sgm_uniform",
|
||||
"denoise": 1,
|
||||
"model": [
|
||||
"13",
|
||||
0
|
||||
],
|
||||
"positive": [
|
||||
"6",
|
||||
0
|
||||
],
|
||||
"negative": [
|
||||
"69",
|
||||
0
|
||||
],
|
||||
"latent_image": [
|
||||
"135",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "KSampler",
|
||||
"_meta": {
|
||||
"title": "KSampler"
|
||||
}
|
||||
},
|
||||
"272": {
|
||||
"inputs": {
|
||||
"clip_name": "t5xxl_fp8_e4m3fn.safetensors",
|
||||
"type": "sd3"
|
||||
},
|
||||
"class_type": "CLIPLoader",
|
||||
"_meta": {
|
||||
"title": "Load CLIP"
|
||||
}
|
||||
}
|
||||
}
|
||||
254
tests/unit/test_validation.py
Normal file
254
tests/unit/test_validation.py
Normal file
@ -0,0 +1,254 @@
|
||||
from contextvars import ContextVar
|
||||
from typing import Final
|
||||
|
||||
import pytest
|
||||
from pytest_mock import MockerFixture
|
||||
|
||||
from comfy.cli_args import args
|
||||
from comfy.cmd.execution import validate_prompt, nodes
|
||||
|
||||
valid_prompt: Final[dict] = {
|
||||
"1": {
|
||||
"inputs": {
|
||||
"ckpt_name": "model1.safetensors",
|
||||
},
|
||||
"class_type": "CheckpointLoaderSimple",
|
||||
},
|
||||
"2": {
|
||||
"inputs": {
|
||||
"text": "a beautiful landscape",
|
||||
"clip": ["1", 1],
|
||||
},
|
||||
"class_type": "CLIPTextEncode",
|
||||
},
|
||||
"3": {
|
||||
"inputs": {
|
||||
"text": "ugly, deformed",
|
||||
"clip": ["1", 1],
|
||||
},
|
||||
"class_type": "CLIPTextEncode",
|
||||
},
|
||||
"4": {
|
||||
"inputs": {
|
||||
"width": 512,
|
||||
"height": 512,
|
||||
"batch_size": 1,
|
||||
},
|
||||
"class_type": "EmptyLatentImage",
|
||||
},
|
||||
"5": {
|
||||
"inputs": {
|
||||
"model": ["1", 0],
|
||||
"seed": 42,
|
||||
"steps": 20,
|
||||
"cfg": 7.0,
|
||||
"sampler_name": "euler",
|
||||
"scheduler": "normal",
|
||||
"positive": ["2", 0],
|
||||
"negative": ["3", 0],
|
||||
"latent_image": ["4", 0],
|
||||
"denoise": 1.0,
|
||||
},
|
||||
"class_type": "KSampler",
|
||||
},
|
||||
"6": {
|
||||
"inputs": {
|
||||
"samples": ["5", 0],
|
||||
"vae": ["1", 2],
|
||||
},
|
||||
"class_type": "VAEDecode",
|
||||
},
|
||||
"7": {
|
||||
"inputs": {
|
||||
"images": ["6", 0],
|
||||
"filename_prefix": "test_output",
|
||||
},
|
||||
"class_type": "SaveImage",
|
||||
},
|
||||
}
|
||||
|
||||
known_models: ContextVar[list[str]] = ContextVar('known_models', default=[])
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_nodes(mocker: MockerFixture):
|
||||
class MockCheckpointLoaderSimple:
|
||||
@staticmethod
|
||||
def INPUT_TYPES():
|
||||
models = known_models.get()
|
||||
return {
|
||||
"required": {
|
||||
"ckpt_name": (models if models else ["model1.safetensors", "model2.safetensors"],),
|
||||
}
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("MODEL", "CLIP", "VAE")
|
||||
|
||||
mocker.patch.dict(nodes.NODE_CLASS_MAPPINGS, {
|
||||
"CheckpointLoaderSimple": MockCheckpointLoaderSimple,
|
||||
"KSampler": type("KSampler", (), {
|
||||
"INPUT_TYPES": staticmethod(lambda: {
|
||||
"required": {
|
||||
"model": ("MODEL",),
|
||||
"seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
|
||||
"steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
|
||||
"cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}),
|
||||
"sampler_name": (["euler", "euler_ancestral", "heun", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_sde", "dpmpp_2m", "ddim", "uni_pc", "uni_pc_bh2"],),
|
||||
"scheduler": (["normal", "karras", "exponential", "sgm_uniform", "simple", "ddim_uniform"],),
|
||||
"positive": ("CONDITIONING",),
|
||||
"negative": ("CONDITIONING",),
|
||||
"latent_image": ("LATENT",),
|
||||
"denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}),
|
||||
}
|
||||
}),
|
||||
"RETURN_TYPES": ("LATENT",),
|
||||
}),
|
||||
"CLIPTextEncode": type("CLIPTextEncode", (), {
|
||||
"INPUT_TYPES": staticmethod(lambda: {
|
||||
"required": {
|
||||
"text": ("STRING", {"multiline": True}),
|
||||
"clip": ("CLIP",),
|
||||
}
|
||||
}),
|
||||
"RETURN_TYPES": ("CONDITIONING",),
|
||||
}),
|
||||
"VAEDecode": type("VAEDecode", (), {
|
||||
"INPUT_TYPES": staticmethod(lambda: {
|
||||
"required": {
|
||||
"samples": ("LATENT",),
|
||||
"vae": ("VAE",),
|
||||
}
|
||||
}),
|
||||
"RETURN_TYPES": ("IMAGE",),
|
||||
}),
|
||||
"SaveImage": type("SaveImage", (), {
|
||||
"INPUT_TYPES": staticmethod(lambda: {
|
||||
"required": {
|
||||
"images": ("IMAGE",),
|
||||
"filename_prefix": ("STRING", {"default": "ComfyUI"}),
|
||||
}
|
||||
}),
|
||||
"RETURN_TYPES": (),
|
||||
"OUTPUT_NODE": True,
|
||||
}),
|
||||
"EmptyLatentImage": type("EmptyLatentImage", (), {
|
||||
"INPUT_TYPES": staticmethod(lambda: {
|
||||
"required": {
|
||||
"width": ("INT", {"default": 512, "min": 16, "max": 8192}),
|
||||
"height": ("INT", {"default": 512, "min": 16, "max": 8192}),
|
||||
"batch_size": ("INT", {"default": 1, "min": 1, "max": 64}),
|
||||
}
|
||||
}),
|
||||
"RETURN_TYPES": ("LATENT",),
|
||||
}),
|
||||
})
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def disable_known_models():
|
||||
original_value = args.disable_known_models
|
||||
args.disable_known_models = False
|
||||
yield
|
||||
args.disable_known_models = original_value
|
||||
|
||||
|
||||
def test_validate_prompt_valid(mock_nodes):
|
||||
prompt = valid_prompt
|
||||
result = validate_prompt(prompt)
|
||||
assert result.valid
|
||||
assert result.error is None
|
||||
assert set(result.good_output_node_ids) == {"7"}
|
||||
|
||||
|
||||
def test_validate_prompt_invalid_node(mock_nodes):
|
||||
prompt = {
|
||||
"1": {
|
||||
"inputs": {},
|
||||
"class_type": "NonExistentNode",
|
||||
},
|
||||
}
|
||||
|
||||
result = validate_prompt(prompt)
|
||||
assert not result.valid
|
||||
assert result.error["type"] == "invalid_prompt"
|
||||
assert "NonExistentNode" in result.error["message"]
|
||||
|
||||
|
||||
def test_prompt_has_no_output(mock_nodes):
|
||||
prompt = {
|
||||
"1": {
|
||||
"inputs": {},
|
||||
"class_type": "CheckpointLoaderSimple",
|
||||
},
|
||||
}
|
||||
|
||||
result = validate_prompt(prompt)
|
||||
assert not result.valid
|
||||
assert result.error["type"] == "prompt_no_outputs"
|
||||
|
||||
|
||||
def test_validate_prompt_invalid_input_type(mock_nodes):
|
||||
prompt = valid_prompt.copy()
|
||||
prompt["1"] = {
|
||||
"inputs": {
|
||||
"ckpt_name": 123,
|
||||
},
|
||||
"class_type": "CheckpointLoaderSimple",
|
||||
}
|
||||
|
||||
result = validate_prompt(prompt)
|
||||
assert not result.valid
|
||||
assert result.error["type"] == "prompt_outputs_failed_validation"
|
||||
assert result.node_errors["1"]["errors"][0]["type"] == "exception_during_inner_validation"
|
||||
|
||||
|
||||
@pytest.mark.parametrize("ckpt_name, known_model", [
|
||||
("model\\with\\backslash.safetensors", "model/with/backslash.safetensors"),
|
||||
("model/with/forward/slash.safetensors", "model/with/forward/slash.safetensors"),
|
||||
("mixed\\slash/path.safetensors", "mixed/slash/path.safetensors"),
|
||||
("model with spaces.safetensors", "model with spaces.safetensors"),
|
||||
("model_with_underscores.safetensors", "model_with_underscores.safetensors"),
|
||||
("C:\\Windows\\Temp\\model.safetensors", "C:/Windows/Temp/model.safetensors"),
|
||||
("/home/user/models/model.safetensors", "/home/user/models/model.safetensors"),
|
||||
])
|
||||
def test_validate_prompt_path_variations(mock_nodes, disable_known_models, ckpt_name, known_model):
|
||||
token = known_models.set([known_model])
|
||||
|
||||
try:
|
||||
prompt = valid_prompt.copy()
|
||||
prompt["1"] = {
|
||||
"inputs": {
|
||||
"ckpt_name": ckpt_name,
|
||||
},
|
||||
"class_type": "CheckpointLoaderSimple",
|
||||
}
|
||||
|
||||
result = validate_prompt(prompt)
|
||||
assert result.valid, f"Failed for ckpt_name: {ckpt_name}, known_model: {known_model}"
|
||||
assert result.error is None, f"Error for ckpt_name: {ckpt_name}, known_model: {known_model}"
|
||||
finally:
|
||||
known_models.reset(token)
|
||||
|
||||
|
||||
def test_validate_prompt_default_models(mock_nodes, disable_known_models):
|
||||
prompt = valid_prompt.copy()
|
||||
prompt["1"]["inputs"]["ckpt_name"] = "model1.safetensors"
|
||||
|
||||
result = validate_prompt(prompt)
|
||||
assert result.valid, "Failed for default model list"
|
||||
assert result.error is None, "Error for default model list"
|
||||
|
||||
|
||||
def test_validate_prompt_no_outputs(mock_nodes):
|
||||
prompt = {
|
||||
"1": {
|
||||
"inputs": {
|
||||
"ckpt_name": "model1.safetensors",
|
||||
},
|
||||
"class_type": "CheckpointLoaderSimple",
|
||||
},
|
||||
}
|
||||
|
||||
result = validate_prompt(prompt)
|
||||
assert not result.valid
|
||||
assert result.error["type"] == "prompt_no_outputs"
|
||||
Loading…
Reference in New Issue
Block a user