mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2026-01-10 06:10:50 +08:00
- Experimental support for sage attention on Linux - Diffusers loader now supports model indices - Transformers model management now aligns with updates to ComfyUI - Flux layers correctly use unbind - Add float8 support for model loading in more places - Experimental quantization approaches from Quanto and torchao - Model upscaling interacts with memory management better This update also disables ROCm testing because it isn't reliable enough on consumer hardware. ROCm is not really supported by the 7600.
52 lines
2.0 KiB
Python
52 lines
2.0 KiB
Python
from __future__ import annotations
|
|
|
|
import os
|
|
|
|
from . import sd, utils
|
|
|
|
|
|
def first_file(path, filenames) -> str | None:
|
|
for f in filenames:
|
|
p = os.path.join(path, f)
|
|
if os.path.exists(p):
|
|
return str(p)
|
|
return None
|
|
|
|
|
|
def load_diffusers(model_path, output_vae=True, output_clip=True, embedding_directory=None, model_options=None):
|
|
if model_options is None:
|
|
model_options = {}
|
|
diffusion_model_names = [
|
|
"diffusion_pytorch_model.fp16.safetensors",
|
|
"diffusion_pytorch_model.safetensors",
|
|
"diffusion_pytorch_model.fp16.bin",
|
|
"diffusion_pytorch_model.bin",
|
|
"diffusion_pytorch_model.safetensors.index.json"
|
|
]
|
|
unet_path = first_file(os.path.join(model_path, "unet"), diffusion_model_names) or first_file(os.path.join(model_path, "transformer"), diffusion_model_names)
|
|
vae_path = first_file(os.path.join(model_path, "vae"), diffusion_model_names)
|
|
|
|
text_encoder_model_names = ["model.fp16.safetensors", "model.safetensors", "pytorch_model.fp16.bin", "pytorch_model.bin"]
|
|
text_encoder1_path = first_file(os.path.join(model_path, "text_encoder"), text_encoder_model_names)
|
|
text_encoder2_path = first_file(os.path.join(model_path, "text_encoder_2"), text_encoder_model_names)
|
|
|
|
text_encoder_paths = [text_encoder1_path]
|
|
if text_encoder2_path is not None:
|
|
text_encoder_paths.append(text_encoder2_path)
|
|
|
|
unet = None
|
|
if unet_path is not None:
|
|
unet = sd.load_diffusion_model(unet_path, model_options=model_options)
|
|
|
|
clip = None
|
|
textmodel_json_config1 = first_file(os.path.join(model_path, "text_encoder"), ["config.json"])
|
|
if output_clip and not all(te is None for te in text_encoder_paths):
|
|
clip = sd.load_clip(text_encoder_paths, embedding_directory=embedding_directory, textmodel_json_config=textmodel_json_config1)
|
|
|
|
vae = None
|
|
if output_vae and vae_path is not None:
|
|
_sd = utils.load_torch_file(vae_path)
|
|
vae = sd.VAE(sd=_sd)
|
|
|
|
return unet, clip, vae
|