mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2026-05-08 08:12:34 +08:00
auto encoder from pt, clip from versions
This commit is contained in:
parent
c02baed00f
commit
16e4945f30
@ -53,6 +53,8 @@ class AutoencoderKL(torch.nn.Module):
|
|||||||
if path.lower().endswith(".safetensors"):
|
if path.lower().endswith(".safetensors"):
|
||||||
import safetensors.torch
|
import safetensors.torch
|
||||||
sd = safetensors.torch.load_file(path, device="cpu")
|
sd = safetensors.torch.load_file(path, device="cpu")
|
||||||
|
elif path.lower().endswith(".pth") or path.lower().endswith(".pt"):
|
||||||
|
sd = torch.load(path, map_location='cpu')
|
||||||
else:
|
else:
|
||||||
sd = torch.load(path, map_location="cpu")["state_dict"]
|
sd = torch.load(path, map_location="cpu")["state_dict"]
|
||||||
keys = list(sd.keys())
|
keys = list(sd.keys())
|
||||||
|
|||||||
24
comfy/sd.py
24
comfy/sd.py
@ -767,15 +767,23 @@ def load_style_model(ckpt_path):
|
|||||||
return StyleModel(model)
|
return StyleModel(model)
|
||||||
|
|
||||||
|
|
||||||
def load_clip(ckpt_path, embedding_directory=None):
|
def load_clip(ckpt_path, version = None, embedding_directory=None):
|
||||||
clip_data = utils.load_torch_file(ckpt_path)
|
if version is not None:
|
||||||
config = {}
|
assert version in ("openai/clip-vit-large-patch14", )
|
||||||
if "text_model.encoder.layers.22.mlp.fc1.weight" in clip_data:
|
config = {}
|
||||||
config['target'] = 'ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder'
|
if version == "openai/clip-vit-large-patch14":
|
||||||
|
config['target'] = 'ldm.modules.encoders.modules.FrozenCLIPEmbedder'
|
||||||
|
config["params"] = {"textmodel_path": version}
|
||||||
|
clip = CLIP(config=config, embedding_directory=embedding_directory)
|
||||||
else:
|
else:
|
||||||
config['target'] = 'ldm.modules.encoders.modules.FrozenCLIPEmbedder'
|
clip_data = utils.load_torch_file(ckpt_path)
|
||||||
clip = CLIP(config=config, embedding_directory=embedding_directory)
|
config = {}
|
||||||
clip.load_from_state_dict(clip_data)
|
if "text_model.encoder.layers.22.mlp.fc1.weight" in clip_data:
|
||||||
|
config['target'] = 'ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder'
|
||||||
|
else:
|
||||||
|
config['target'] = 'ldm.modules.encoders.modules.FrozenCLIPEmbedder'
|
||||||
|
clip = CLIP(config=config, embedding_directory=embedding_directory)
|
||||||
|
clip.load_from_state_dict(clip_data)
|
||||||
return clip
|
return clip
|
||||||
|
|
||||||
def load_checkpoint(config_path, ckpt_path, output_vae=True, output_clip=True, embedding_directory=None):
|
def load_checkpoint(config_path, ckpt_path, output_vae=True, output_clip=True, embedding_directory=None):
|
||||||
|
|||||||
15
nodes.py
15
nodes.py
@ -373,6 +373,20 @@ class CLIPLoader:
|
|||||||
clip = comfy.sd.load_clip(ckpt_path=clip_path, embedding_directory=folder_paths.get_folder_paths("embeddings"))
|
clip = comfy.sd.load_clip(ckpt_path=clip_path, embedding_directory=folder_paths.get_folder_paths("embeddings"))
|
||||||
return (clip,)
|
return (clip,)
|
||||||
|
|
||||||
|
class CLIPVersionLoader:
|
||||||
|
@classmethod
|
||||||
|
def INPUT_TYPES(s):
|
||||||
|
return {"required": { "clip_version": (["openai/clip-vit-large-patch14"], ),
|
||||||
|
}}
|
||||||
|
RETURN_TYPES = ("CLIP",)
|
||||||
|
FUNCTION = "load_clip"
|
||||||
|
|
||||||
|
CATEGORY = "loaders"
|
||||||
|
|
||||||
|
def load_clip(self, clip_version):
|
||||||
|
clip = comfy.sd.load_clip(ckpt_path=None, version=clip_version, embedding_directory=folder_paths.get_folder_paths("embeddings"))
|
||||||
|
return (clip,)
|
||||||
|
|
||||||
class CLIPVisionLoader:
|
class CLIPVisionLoader:
|
||||||
@classmethod
|
@classmethod
|
||||||
def INPUT_TYPES(s):
|
def INPUT_TYPES(s):
|
||||||
@ -1065,6 +1079,7 @@ NODE_CLASS_MAPPINGS = {
|
|||||||
"LatentCrop": LatentCrop,
|
"LatentCrop": LatentCrop,
|
||||||
"LoraLoader": LoraLoader,
|
"LoraLoader": LoraLoader,
|
||||||
"CLIPLoader": CLIPLoader,
|
"CLIPLoader": CLIPLoader,
|
||||||
|
"CLIPVersionLoader": CLIPVersionLoader,
|
||||||
"CLIPVisionEncode": CLIPVisionEncode,
|
"CLIPVisionEncode": CLIPVisionEncode,
|
||||||
"StyleModelApply": StyleModelApply,
|
"StyleModelApply": StyleModelApply,
|
||||||
"unCLIPConditioning": unCLIPConditioning,
|
"unCLIPConditioning": unCLIPConditioning,
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user