improved: Model-Manager now robustly recognizes installed models.

https://github.com/ltdrdata/ComfyUI-Manager/issues/1391
This commit is contained in:
Dr.Lt.Data 2025-01-29 23:13:17 +09:00
parent 717ca1bb18
commit 6d771f77e6
6 changed files with 257 additions and 242 deletions

View File

@ -41,7 +41,7 @@ import manager_downloader
from node_package import InstalledNodePackage
version_code = [3, 9, 5]
version_code = [3, 11]
version_str = f"V{version_code[0]}.{version_code[1]}" + (f'.{version_code[2]}' if len(version_code) > 2 else '')

View File

@ -13,6 +13,7 @@ import shutil
import git
from datetime import datetime
from folder_paths import get_filename_list
from server import PromptServer
import manager_core as core
import manager_util
@ -54,6 +55,27 @@ import latent_preview
is_local_mode = args.listen.startswith('127.') or args.listen.startswith('local.')
model_dir_name_map = {
"checkpoints": "checkpoints",
"checkpoint": "checkpoints",
"unclip": "checkpoints",
"text_encoders": "text_encoders",
"clip": "text_encoders",
"vae": "vae",
"lora": "loras",
"t2i-adapter": "controlnet",
"t2i-style": "controlnet",
"controlnet": "controlnet",
"clip_vision": "clip_vision",
"gligen": "gligen",
"upscale": "upscale_models",
"embedding": "embeddings",
"embeddings": "embeddings",
"unet": "diffusion_models",
"diffusion_model": "diffusion_models",
}
def is_allowed_security_level(level):
if level == 'block':
return False
@ -274,43 +296,9 @@ def get_model_dir(data, show_log=False):
else:
base_model = os.path.join(models_base, data['save_path'])
else:
model_type = data['type']
if model_type == "checkpoints" or model_type == "checkpoint":
base_model = folder_paths.folder_names_and_paths["checkpoints"][0][0]
elif model_type == "unclip":
base_model = folder_paths.folder_names_and_paths["checkpoints"][0][0]
elif model_type == "clip" or model_type == "text_encoders":
if folder_paths.folder_names_and_paths.get("text_encoders"):
base_model = folder_paths.folder_names_and_paths["text_encoders"][0][0]
else:
if show_log:
logging.info("[ComfyUI-Manager] Your ComfyUI is outdated version.")
base_model = folder_paths.folder_names_and_paths["clip"][0][0] # outdated version
elif model_type == "VAE":
base_model = folder_paths.folder_names_and_paths["vae"][0][0]
elif model_type == "lora":
base_model = folder_paths.folder_names_and_paths["loras"][0][0]
elif model_type == "T2I-Adapter":
base_model = folder_paths.folder_names_and_paths["controlnet"][0][0]
elif model_type == "T2I-Style":
base_model = folder_paths.folder_names_and_paths["controlnet"][0][0]
elif model_type == "controlnet":
base_model = folder_paths.folder_names_and_paths["controlnet"][0][0]
elif model_type == "clip_vision":
base_model = folder_paths.folder_names_and_paths["clip_vision"][0][0]
elif model_type == "gligen":
base_model = folder_paths.folder_names_and_paths["gligen"][0][0]
elif model_type == "upscale":
base_model = folder_paths.folder_names_and_paths["upscale_models"][0][0]
elif model_type == "embeddings":
base_model = folder_paths.folder_names_and_paths["embeddings"][0][0]
elif model_type == "unet" or model_type == "diffusion_model":
if folder_paths.folder_names_and_paths.get("diffusion_models"):
base_model = folder_paths.folder_names_and_paths["diffusion_models"][0][1]
else:
if show_log:
logging.info("[ComfyUI-Manager] Your ComfyUI is outdated version.")
base_model = folder_paths.folder_names_and_paths["unet"][0][0] # outdated version
model_dir_name = model_dir_name_map.get(data['type'].lower())
if model_dir_name is not None:
base_model = folder_paths.folder_names_and_paths[model_dir_name][0][0]
else:
base_model = os.path.join(models_base, "etc")
@ -605,25 +593,50 @@ async def fetch_customnode_alternatives(request):
def check_model_installed(json_obj):
def process_model(item):
model_path = get_model_path(item, False)
item['installed'] = 'None'
def is_exists(model_dir_name, file_name):
dirs = folder_paths.get_folder_paths(model_dir_name)
for x in dirs:
if os.path.exists(os.path.join(x, file_name)):
return True
if model_path is not None:
if model_path.endswith('.zip'):
if os.path.exists(model_path[:-4]):
item['installed'] = 'True'
else:
item['installed'] = 'False'
elif os.path.exists(model_path):
return False
model_dir_names = ['checkpoints', 'loras', 'vae', 'text_encoders', 'diffusion_models', 'clip_vision', 'embeddings',
'diffusers', 'vae_approx', 'controlnet', 'gligen', 'upscale_models', 'hypernetworks',
'photomaker', 'classifiers']
total_models_files = set()
for x in model_dir_names:
for y in folder_paths.get_filename_list(x):
total_models_files.add(y)
def process_model_phase(item):
if 'diffusion' not in item['filename'] and 'pytorch' not in item['filename'] and 'model' not in item['filename']:
# non-general name case
if item['filename'] in total_models_files:
item['installed'] = 'True'
return
if item['save_path'] == 'default':
model_dir_name = model_dir_name_map.get(item['type'].lower())
if model_dir_name is not None:
item['installed'] = str(is_exists(model_dir_name, item['filename']))
else:
item['installed'] = 'False'
else:
model_dir_name = item['save_path'].split('/')[0]
if model_dir_name in folder_paths.folder_names_and_paths:
if is_exists(model_dir_name, item['filename']):
item['installed'] = 'True'
if 'installed' not in item:
fullpath = os.path.join(folder_paths.models_dir, item['save_path'], item['filename'])
item['installed'] = 'True' if os.path.exists(fullpath) else 'False'
with concurrent.futures.ThreadPoolExecutor(8) as executor:
for item in json_obj['models']:
executor.submit(process_model, item)
executor.submit(process_model_phase, item)
@routes.get("/externalmodel/getlist")
async def fetch_externalmodel_list(request):

View File

@ -310,7 +310,7 @@
},
{
"name": "negative_hand Negative Embedding",
"type": "embeddings",
"type": "embedding",
"base": "SD1.5",
"save_path": "embeddings/SD1.5",
"description": "If you use this embedding with negatives, you can solve the issue of damaging your hands.",
@ -321,7 +321,7 @@
},
{
"name": "bad_prompt Negative Embedding",
"type": "embeddings",
"type": "embedding",
"base": "SD1.5",
"save_path": "embeddings/SD1.5",
"description": "The idea behind this embedding was to somehow train the negative prompt as an embedding, thus unifying the basis of the negative prompt into one word or embedding.",
@ -332,7 +332,7 @@
},
{
"name": "Deep Negative V1.75",
"type": "embeddings",
"type": "embedding",
"base": "SD1.5",
"save_path": "embeddings/SD1.5",
"description": "These embedding learn what disgusting compositions and color patterns are, including faulty human anatomy, offensive color schemes, upside-down spatial structures, and more. Placing it in the negative can go a long way to avoiding these things.",
@ -343,7 +343,7 @@
},
{
"name": "EasyNegative",
"type": "embeddings",
"type": "embedding",
"base": "SD1.5",
"save_path": "embeddings/SD1.5",
"description": "This embedding should be used in your NEGATIVE prompt. Adjust the strength as desired (seems to scale well without any distortions), the strength required may vary based on positive and negative prompts.",
@ -488,7 +488,7 @@
"name": "stabilityai/Stable Cascade: text_encoder (CLIP)",
"type": "clip",
"base": "Stable Cascade",
"save_path": "clip/Stable-Cascade",
"save_path": "text_encoders/Stable-Cascade",
"description": "Stable Cascade: text_encoder",
"reference": "https://huggingface.co/stabilityai/stable-cascade",
"filename": "model.safetensors",
@ -937,7 +937,7 @@
"name": "google-t5/t5-base",
"type": "clip",
"base": "t5-base",
"save_path": "clip/t5-base",
"save_path": "text_encoders/t5-base",
"description": "T5 Base: Text-To-Text Transfer Transformer. This model can be loaded via CLIPLoader for Stable Audio workflow.",
"reference": "https://huggingface.co/google-t5/t5-base",
"filename": "model.safetensors",
@ -948,7 +948,7 @@
"name": "google-t5/t5-v1_1-xxl_encoderonly-fp16",
"type": "clip",
"base": "t5",
"save_path": "clip/t5",
"save_path": "text_encoders/t5",
"description": "The encoder part of https://huggingface.co/google/t5-v1_1-xxl, used with SD3 and Flux1",
"reference": "https://huggingface.co/mcmonkey/google_t5-v1_1-xxl_encoderonly",
"filename": "google_t5-v1_1-xxl_encoderonly-fp16.safetensors",
@ -959,7 +959,7 @@
"name": "google-t5/t5-v1_1-xxl_encoderonly-fp8_e4m3fn",
"type": "clip",
"base": "t5",
"save_path": "clip/t5",
"save_path": "text_encoders/t5",
"description": "The encoder part of https://huggingface.co/google/t5-v1_1-xxl, used with SD3 and Flux1",
"reference": "https://huggingface.co/mcmonkey/google_t5-v1_1-xxl_encoderonly",
"filename": "google_t5-v1_1-xxl_encoderonly-fp8_e4m3fn.safetensors",
@ -972,7 +972,7 @@
"name": "city96/t5-v1_1-xxl-encoder-Q3_K_L.gguf",
"type": "clip",
"base": "t5",
"save_path": "clip/t5",
"save_path": "text_encoders/t5",
"description": "t5xxl Text Encoder GGUF model. (Q3_K_L quantized)",
"reference": "https://huggingface.co/city96/t5-v1_1-xxl-encoder-gguf",
"filename": "t5-v1_1-xxl-encoder-Q3_K_L.gguf",
@ -983,7 +983,7 @@
"name": "city96/t5-v1_1-xxl-encoder-Q3_K_M.gguf",
"type": "clip",
"base": "t5",
"save_path": "clip/t5",
"save_path": "text_encoders/t5",
"description": "t5xxl Text Encoder GGUF model. (Q3_K_M quantized)",
"reference": "https://huggingface.co/city96/t5-v1_1-xxl-encoder-gguf",
"filename": "t5-v1_1-xxl-encoder-Q3_K_M.gguf",
@ -994,7 +994,7 @@
"name": "city96/t5-v1_1-xxl-encoder-Q3_K_S.gguf",
"type": "clip",
"base": "t5",
"save_path": "clip/t5",
"save_path": "text_encoders/t5",
"description": "t5xxl Text Encoder GGUF model. (Q3_K_S quantized)",
"reference": "https://huggingface.co/city96/t5-v1_1-xxl-encoder-gguf",
"filename": "t5-v1_1-xxl-encoder-Q3_K_S.gguf",
@ -1005,7 +1005,7 @@
"name": "city96/t5-v1_1-xxl-encoder-Q4_K_M.gguf",
"type": "clip",
"base": "t5",
"save_path": "clip/t5",
"save_path": "text_encoders/t5",
"description": "t5xxl Text Encoder GGUF model. (Q4_K_M quantized)",
"reference": "https://huggingface.co/city96/t5-v1_1-xxl-encoder-gguf",
"filename": "t5-v1_1-xxl-encoder-Q4_K_M.gguf",
@ -1016,7 +1016,7 @@
"name": "city96/t5-v1_1-xxl-encoder-Q4_K_S.gguf",
"type": "clip",
"base": "t5",
"save_path": "clip/t5",
"save_path": "text_encoders/t5",
"description": "t5xxl Text Encoder GGUF model. (Q4_K_S quantized)",
"reference": "https://huggingface.co/city96/t5-v1_1-xxl-encoder-gguf",
"filename": "t5-v1_1-xxl-encoder-Q4_K_S.gguf",
@ -1027,7 +1027,7 @@
"name": "city96/t5-v1_1-xxl-encoder-Q5_K_M.gguf",
"type": "clip",
"base": "t5",
"save_path": "clip/t5",
"save_path": "text_encoders/t5",
"description": "t5xxl Text Encoder GGUF model. (Q5_K_M quantized)",
"reference": "https://huggingface.co/city96/t5-v1_1-xxl-encoder-gguf",
"filename": "t5-v1_1-xxl-encoder-Q5_K_M.gguf",
@ -1038,7 +1038,7 @@
"name": "city96/t5-v1_1-xxl-encoder-Q5_K_S.gguf",
"type": "clip",
"base": "t5",
"save_path": "clip/t5",
"save_path": "text_encoders/t5",
"description": "t5xxl Text Encoder GGUF model. (Q5_K_S quantized)",
"reference": "https://huggingface.co/city96/t5-v1_1-xxl-encoder-gguf",
"filename": "t5-v1_1-xxl-encoder-Q5_K_S.gguf",
@ -1049,7 +1049,7 @@
"name": "city96/t5-v1_1-xxl-encoder-Q6_K.gguf",
"type": "clip",
"base": "t5",
"save_path": "clip/t5",
"save_path": "text_encoders/t5",
"description": "t5xxl Text Encoder GGUF model. (Q6_K quantized)",
"reference": "https://huggingface.co/city96/t5-v1_1-xxl-encoder-gguf",
"filename": "t5-v1_1-xxl-encoder-Q6_K.gguf",
@ -1060,7 +1060,7 @@
"name": "city96/t5-v1_1-xxl-encoder-Q8_0.gguf",
"type": "clip",
"base": "t5",
"save_path": "clip/t5",
"save_path": "text_encoders/t5",
"description": "t5xxl Text Encoder GGUF model. (Q8_0 quantized)",
"reference": "https://huggingface.co/city96/t5-v1_1-xxl-encoder-gguf",
"filename": "t5-v1_1-xxl-encoder-Q8_0.gguf",
@ -1071,7 +1071,7 @@
"name": "city96/t5-v1_1-xxl-encoder-f16.gguf",
"type": "clip",
"base": "t5",
"save_path": "clip/t5",
"save_path": "text_encoders/t5",
"description": "t5xxl Text Encoder GGUF model. (float 16)",
"reference": "https://huggingface.co/city96/t5-v1_1-xxl-encoder-gguf",
"filename": "t5-v1_1-xxl-encoder-f16.gguf",
@ -1082,7 +1082,7 @@
"name": "city96/t5-v1_1-xxl-encoder-f32.gguf",
"type": "clip",
"base": "t5",
"save_path": "clip/t5",
"save_path": "text_encoders/t5",
"description": "t5xxl Text Encoder GGUF model. (float 32)",
"reference": "https://huggingface.co/city96/t5-v1_1-xxl-encoder-gguf",
"filename": "t5-v1_1-xxl-encoder-f32.gguf",
@ -1721,17 +1721,6 @@
"url": "https://huggingface.co/stabilityai/control-lora/resolve/main/control-LoRAs-rank256/control-lora-sketch-rank256.safetensors",
"size": "774.5MB"
},
{
"name": "kohya-ss/ControlNet-LLLite: SDXL Canny Anime",
"type": "controlnet",
"base": "SDXL",
"save_path": "custom_nodes/ControlNet-LLLite-ComfyUI/models",
"description": "An extremely compactly designed controlnet model (a.k.a. ControlNet-LLLite). Note: The model structure is highly experimental and may be subject to change in the future.",
"reference": "https://huggingface.co/kohya-ss/controlnet-lllite",
"filename": "controllllite_v01032064e_sdxl_canny_anime.safetensors",
"url": "https://huggingface.co/kohya-ss/controlnet-lllite/resolve/main/controllllite_v01032064e_sdxl_canny_anime.safetensors",
"size": "46.2MB"
},
{
"name": "SDXL-controlnet: OpenPose (v2)",
"type": "controlnet",
@ -2814,39 +2803,6 @@
{
"name": "pfg-novel-n10.pt",
"type": "PFG",
"base": "SD1.5",
"save_path": "custom_nodes/pfg-ComfyUI/models",
"description": "Pressing 'install' directly downloads the model from the pfg-ComfyUI/models extension node. (Note: Requires ComfyUI-Manager V0.24 or above)",
"reference": "https://huggingface.co/furusu/PFG",
"filename": "pfg-novel-n10.pt",
"url": "https://huggingface.co/furusu/PFG/resolve/main/pfg-novel-n10.pt",
"size": "23.6MB"
},
{
"name": "pfg-wd14-n10.pt",
"type": "PFG",
"base": "SD1.5",
"save_path": "custom_nodes/pfg-ComfyUI/models",
"description": "Pressing 'install' directly downloads the model from the pfg-ComfyUI/models extension node. (Note: Requires ComfyUI-Manager V0.24 or above)",
"reference": "https://huggingface.co/furusu/PFG",
"filename": "pfg-wd14-n10.pt",
"url": "https://huggingface.co/furusu/PFG/resolve/main/pfg-wd14-n10.pt",
"size": "31.5MB"
},
{
"name": "pfg-wd15beta2-n10.pt",
"type": "PFG",
"base": "SD1.5",
"save_path": "custom_nodes/pfg-ComfyUI/models",
"description": "Pressing 'install' directly downloads the model from the pfg-ComfyUI/models extension node. (Note: Requires ComfyUI-Manager V0.24 or above)",
"reference": "https://huggingface.co/furusu/PFG",
"filename": "pfg-wd15beta2-n10.pt",
"url": "https://huggingface.co/furusu/PFG/resolve/main/pfg-wd15beta2-n10.pt",
"size": "31.5MB"
},
{
"name": "GFPGANv1.4.pth",
"type": "GFPGAN",
@ -3012,50 +2968,6 @@
"url": "https://huggingface.co/InstantX/InstantID/resolve/main/ControlNetModel/diffusion_pytorch_model.safetensors",
"size": "2.50GB"
},
{
"name": "efficient_sam_s_cpu.jit [ComfyUI-YoloWorld-EfficientSAM]",
"type": "efficient_sam",
"base": "efficient_sam",
"save_path": "custom_nodes/ComfyUI-YoloWorld-EfficientSAM",
"description": "Install efficient_sam_s_cpu.jit into ComfyUI-YoloWorld-EfficientSAM",
"reference": "https://huggingface.co/camenduru/YoloWorld-EfficientSAM/tree/main",
"filename": "efficient_sam_s_cpu.jit",
"url": "https://huggingface.co/camenduru/YoloWorld-EfficientSAM/resolve/main/efficient_sam_s_cpu.jit",
"size": "106.0MB"
},
{
"name": "efficient_sam_s_gpu.jit [ComfyUI-YoloWorld-EfficientSAM]",
"type": "efficient_sam",
"base": "efficient_sam",
"save_path": "custom_nodes/ComfyUI-YoloWorld-EfficientSAM",
"description": "Install efficient_sam_s_gpu.jit into ComfyUI-YoloWorld-EfficientSAM",
"reference": "https://huggingface.co/camenduru/YoloWorld-EfficientSAM/tree/main",
"filename": "efficient_sam_s_gpu.jit",
"url": "https://huggingface.co/camenduru/YoloWorld-EfficientSAM/resolve/main/efficient_sam_s_gpu.jit",
"size": "106.0MB"
},
{
"name": "shape_predictor_68_face_landmarks.dat [Face Analysis]",
"type": "Shape Predictor",
"base": "DLIB",
"save_path": "custom_nodes/ComfyUI_FaceAnalysis/dlib",
"description": "To use the Face Analysis for ComfyUI custom node, installation of this model is needed.",
"reference": "https://huggingface.co/matt3ounstable/dlib_predictor_recognition/tree/main",
"filename": "shape_predictor_68_face_landmarks.dat",
"url": "https://huggingface.co/matt3ounstable/dlib_predictor_recognition/resolve/main/shape_predictor_68_face_landmarks.dat",
"size": "99.7MB"
},
{
"name": "dlib_face_recognition_resnet_model_v1.dat [Face Analysis]",
"type": "Face Recognition",
"base": "DLIB",
"save_path": "custom_nodes/ComfyUI_FaceAnalysis/dlib",
"description": "To use the Face Analysis for ComfyUI custom node, installation of this model is needed.",
"reference": "https://huggingface.co/matt3ounstable/dlib_predictor_recognition/tree/main",
"filename": "dlib_face_recognition_resnet_model_v1.dat",
"url": "https://huggingface.co/matt3ounstable/dlib_predictor_recognition/resolve/main/dlib_face_recognition_resnet_model_v1.dat",
"size": "22.5MB"
},
{
"name": "InstanceDiffusion/fusers",
"type": "InstanceDiffusion",
@ -3430,50 +3342,6 @@
"url": "https://huggingface.co/lllyasviel/ic-light/resolve/main/iclight_sd15_fcon.safetensors",
"size": "1.72GB"
},
{
"name": "ID-Animator/animator.ckpt",
"type": "ID-Animator",
"base": "SD1.5",
"save_path": "custom_nodes/ComfyUI_ID_Animator/models",
"description": "ID-Animator checkpoint",
"reference": "https://huggingface.co/spaces/ID-Animator/ID-Animator",
"filename": "animator.ckpt",
"url": "https://huggingface.co/spaces/ID-Animator/ID-Animator/resolve/main/animator.ckpt",
"size": "247.3MB"
},
{
"name": "ID-Animator/mm_sd_v15_v2.ckpt",
"type": "ID-Animator",
"base": "SD1.5",
"save_path": "custom_nodes/ComfyUI_ID_Animator/models/animatediff_models",
"description": "AnimateDiff checkpoint for ID-Animator",
"reference": "https://huggingface.co/spaces/ID-Animator/ID-Animator",
"filename": "mm_sd_v15_v2.ckpt",
"url": "https://huggingface.co/spaces/ID-Animator/ID-Animator/resolve/main/mm_sd_v15_v2.ckpt",
"size": "1.82GB"
},
{
"name": "ID-Animator/image_encoder",
"type": "ID-Animator",
"base": "SD1.5",
"save_path": "custom_nodes/ComfyUI_ID_Animator/models/image_encoder",
"description": "CLIP Image encoder for ID-Animator",
"reference": "https://huggingface.co/spaces/ID-Animator/ID-Animator",
"filename": "model.safetensors",
"url": "https://huggingface.co/spaces/ID-Animator/ID-Animator/resolve/main/image_encoder/model.safetensors",
"size": "2.53GB"
},
{
"name": "TencentARC/CustomNet",
"type": "CustomNet",
"base": "CustomNet",
"save_path": "custom_nodes/ComfyUI_CustomNet/pretrain",
"description": "CustomNet pretrained model for ComfyUI_CustomNet",
"reference": "https://huggingface.co/TencentARC/CustomNet/tree/main",
"filename": "customnet_v1.pt",
"url": "https://huggingface.co/TencentARC/CustomNet/resolve/main/customnet_v1.pt",
"size": "5.71GB"
},
{
"name": "TTPlanet/TTPLanet_SDXL_Controlnet_Tile_Realistic v2 (fp16)",
"type": "controlnet",
@ -3562,17 +3430,6 @@
"url": "https://huggingface.co/ViperYX/RGT/resolve/main/RGT_S/RGT_S_x4.pth",
"size": "136.0MB"
},
{
"name": "Doubiiu/ToonCrafter model checkpoint",
"type": "checkpoint",
"base": "ToonCrafter",
"save_path": "custom_nodes/ComfyUI-ToonCrafter/ToonCrafter/checkpoints/tooncrafter_512_interp_v1",
"description": "ToonCrafter checkpoint model for ComfyUI-ToonCrafter",
"reference": "https://huggingface.co/Doubiiu/ToonCrafter/tree/main",
"filename": "model.ckpt",
"url": "https://huggingface.co/Doubiiu/ToonCrafter/resolve/main/model.ckpt",
"size": "10.5GB"
},
{
"name": "InstantX/FLUX.1-dev Controlnet (Union)",
@ -4465,23 +4322,11 @@
"size": "12.7GB"
},
{
"name": "BAAI/SegGPT",
"type": "SegGPT",
"base": "SegGPT",
"save_path": "custom_nodes/comfyui-SegGPT",
"description": "SegGPT",
"reference": "https://huggingface.co/BAAI/SegGPT",
"filename": "seggpt_vit_large.pth",
"url": "https://huggingface.co/BAAI/SegGPT/resolve/main/seggpt_vit_large.pth",
"size": "1.48GB"
},
{
"name": "ViT-L-14-TEXT-detail-improved-hiT-GmP-HF.safetensors [Long CLIP L]",
"type": "clip",
"base": "clip",
"save_path": "clip/long_clip",
"save_path": "text_encoders/long_clip",
"description": "Greatly improved TEXT + Detail (as CLIP-L for Flux.1)",
"reference": "https://huggingface.co/zer0int",
"filename": "ViT-L-14-TEXT-detail-improved-hiT-GmP-HF.safetensors",
@ -4492,7 +4337,7 @@
"name": "ViT-L-14-TEXT-detail-improved-hiT-GmP-HF.safetensors [Long CLIP L]",
"type": "clip",
"base": "clip",
"save_path": "clip/long_clip",
"save_path": "text_encoders/long_clip",
"description": "Greatly improved TEXT + Detail (as CLIP-L for Flux.1)",
"reference": "https://huggingface.co/zer0int",
"filename": "ViT-L-14-TEXT-detail-improved-hiT-GmP-TE-only-HF.safetensors",
@ -4793,6 +4638,52 @@
"filename": "ip_adapter.safetensors",
"url": "https://huggingface.co/XLabs-AI/flux-ip-adapter/resolve/main/ip_adapter.safetensors",
"size": "982MB"
},
{
"name": "efficient_sam_s_cpu.jit [ComfyUI-YoloWorld-EfficientSAM]",
"type": "efficient_sam",
"base": "efficient_sam",
"save_path": "yolo_world",
"description": "Install efficient_sam_s_cpu.jit into ComfyUI-YoloWorld-EfficientSAM",
"reference": "https://huggingface.co/camenduru/YoloWorld-EfficientSAM/tree/main",
"filename": "efficient_sam_s_cpu.jit",
"url": "https://huggingface.co/camenduru/YoloWorld-EfficientSAM/resolve/main/efficient_sam_s_cpu.jit",
"size": "106.0MB"
},
{
"name": "efficient_sam_s_gpu.jit [ComfyUI-YoloWorld-EfficientSAM]",
"type": "efficient_sam",
"base": "efficient_sam",
"save_path": "yolo_world",
"description": "Install efficient_sam_s_gpu.jit into ComfyUI-YoloWorld-EfficientSAM",
"reference": "https://huggingface.co/camenduru/YoloWorld-EfficientSAM/tree/main",
"filename": "efficient_sam_s_gpu.jit",
"url": "https://huggingface.co/camenduru/YoloWorld-EfficientSAM/resolve/main/efficient_sam_s_gpu.jit",
"size": "106.0MB"
},
{
"name": "TencentARC/CustomNet V1",
"type": "CustomNet",
"base": "CustomNet",
"save_path": "checkpoints/customnet",
"description": "CustomNet pretrained model for ComfyUI_CustomNet",
"reference": "https://huggingface.co/TencentARC/CustomNet/tree/main",
"filename": "customnet_v1.pt",
"url": "https://huggingface.co/TencentARC/CustomNet/resolve/main/customnet_v1.pt",
"size": "5.71GB"
},
{
"name": "TencentARC/CustomNet Inpaint V1",
"type": "CustomNet",
"base": "CustomNet",
"save_path": "checkpoints/customnet",
"description": "CustomNet Inpaint pretrained model for ComfyUI_CustomNet",
"reference": "https://huggingface.co/TencentARC/CustomNet/tree/main",
"filename": "customnet_inpaint_v1.pt",
"url": "https://huggingface.co/TencentARC/CustomNet/resolve/main/customnet_inpaint_v1.pt",
"size": "5.71GB"
}
]
}

View File

@ -1,3 +1,126 @@
{
"models": []
"models": [
{
"name": "pfg-novel-n10.pt",
"type": "PFG",
"base": "SD1.5",
"save_path": "custom_nodes/pfg-ComfyUI/models",
"description": "Pressing 'install' directly downloads the model from the pfg-ComfyUI/models extension node. (Note: Requires ComfyUI-Manager V0.24 or above)",
"reference": "https://huggingface.co/furusu/PFG",
"filename": "pfg-novel-n10.pt",
"url": "https://huggingface.co/furusu/PFG/resolve/main/pfg-novel-n10.pt",
"size": "23.6MB"
},
{
"name": "pfg-wd14-n10.pt",
"type": "PFG",
"base": "SD1.5",
"save_path": "custom_nodes/pfg-ComfyUI/models",
"description": "Pressing 'install' directly downloads the model from the pfg-ComfyUI/models extension node. (Note: Requires ComfyUI-Manager V0.24 or above)",
"reference": "https://huggingface.co/furusu/PFG",
"filename": "pfg-wd14-n10.pt",
"url": "https://huggingface.co/furusu/PFG/resolve/main/pfg-wd14-n10.pt",
"size": "31.5MB"
},
{
"name": "pfg-wd15beta2-n10.pt",
"type": "PFG",
"base": "SD1.5",
"save_path": "custom_nodes/pfg-ComfyUI/models",
"description": "Pressing 'install' directly downloads the model from the pfg-ComfyUI/models extension node. (Note: Requires ComfyUI-Manager V0.24 or above)",
"reference": "https://huggingface.co/furusu/PFG",
"filename": "pfg-wd15beta2-n10.pt",
"url": "https://huggingface.co/furusu/PFG/resolve/main/pfg-wd15beta2-n10.pt",
"size": "31.5MB"
},
{
"name": "shape_predictor_68_face_landmarks.dat [Face Analysis]",
"type": "Shape Predictor",
"base": "DLIB",
"save_path": "custom_nodes/comfyui_faceanalysis/dlib",
"description": "To use the Face Analysis for ComfyUI custom node, installation of this model is needed.",
"reference": "https://huggingface.co/matt3ounstable/dlib_predictor_recognition/tree/main",
"filename": "shape_predictor_68_face_landmarks.dat",
"url": "https://huggingface.co/matt3ounstable/dlib_predictor_recognition/resolve/main/shape_predictor_68_face_landmarks.dat",
"size": "99.7MB"
},
{
"name": "dlib_face_recognition_resnet_model_v1.dat [Face Analysis]",
"type": "Face Recognition",
"base": "DLIB",
"save_path": "custom_nodes/comfyui_faceanalysis/dlib",
"description": "To use the Face Analysis for ComfyUI custom node, installation of this model is needed.",
"reference": "https://huggingface.co/matt3ounstable/dlib_predictor_recognition/tree/main",
"filename": "dlib_face_recognition_resnet_model_v1.dat",
"url": "https://huggingface.co/matt3ounstable/dlib_predictor_recognition/resolve/main/dlib_face_recognition_resnet_model_v1.dat",
"size": "22.5MB"
},
{
"name": "ID-Animator/animator.ckpt",
"type": "ID-Animator",
"base": "SD1.5",
"save_path": "custom_nodes/comfyui_id_animator/models",
"description": "ID-Animator checkpoint",
"reference": "https://huggingface.co/spaces/ID-Animator/ID-Animator",
"filename": "animator.ckpt",
"url": "https://huggingface.co/spaces/ID-Animator/ID-Animator/resolve/main/animator.ckpt",
"size": "247.3MB"
},
{
"name": "ID-Animator/mm_sd_v15_v2.ckpt",
"type": "ID-Animator",
"base": "SD1.5",
"save_path": "custom_nodes/comfyui_id_animator/models/animatediff_models",
"description": "AnimateDiff checkpoint for ID-Animator",
"reference": "https://huggingface.co/spaces/ID-Animator/ID-Animator",
"filename": "mm_sd_v15_v2.ckpt",
"url": "https://huggingface.co/spaces/ID-Animator/ID-Animator/resolve/main/mm_sd_v15_v2.ckpt",
"size": "1.82GB"
},
{
"name": "ID-Animator/image_encoder",
"type": "ID-Animator",
"base": "SD1.5",
"save_path": "custom_nodes/comfyui_id_animator/models/image_encoder",
"description": "CLIP Image encoder for ID-Animator",
"reference": "https://huggingface.co/spaces/ID-Animator/ID-Animator",
"filename": "model.safetensors",
"url": "https://huggingface.co/spaces/ID-Animator/ID-Animator/resolve/main/image_encoder/model.safetensors",
"size": "2.53GB"
},
{
"name": "Doubiiu/ToonCrafter model checkpoint",
"type": "checkpoint",
"base": "ToonCrafter",
"save_path": "custom_nodes/comfyui-tooncrafter/ToonCrafter/checkpoints/tooncrafter_512_interp_v1",
"description": "ToonCrafter checkpoint model for ComfyUI-ToonCrafter",
"reference": "https://huggingface.co/Doubiiu/ToonCrafter/tree/main",
"filename": "model.ckpt",
"url": "https://huggingface.co/Doubiiu/ToonCrafter/resolve/main/model.ckpt",
"size": "10.5GB"
},
{
"name": "BAAI/SegGPT",
"type": "SegGPT",
"base": "SegGPT",
"save_path": "custom_nodes/comfyui-seggpt",
"description": "SegGPT",
"reference": "https://huggingface.co/BAAI/SegGPT",
"filename": "seggpt_vit_large.pth",
"url": "https://huggingface.co/BAAI/SegGPT/resolve/main/seggpt_vit_large.pth",
"size": "1.48GB"
},
{
"name": "kohya-ss/ControlNet-LLLite: SDXL Canny Anime",
"type": "controlnet",
"base": "SDXL",
"save_path": "custom_nodes/ControlNet-LLLite-ComfyUI/models",
"description": "An extremely compactly designed controlnet model (a.k.a. ControlNet-LLLite). Note: The model structure is highly experimental and may be subject to change in the future.",
"reference": "https://huggingface.co/kohya-ss/controlnet-lllite",
"filename": "controllllite_v01032064e_sdxl_canny_anime.safetensors",
"url": "https://huggingface.co/kohya-ss/controlnet-lllite/resolve/main/controllllite_v01032064e_sdxl_canny_anime.safetensors",
"size": "46.2MB"
}
]
}

View File

@ -575,7 +575,7 @@
"name": "ViT-L-14-TEXT-detail-improved-hiT-GmP-HF.safetensors [Long CLIP L]",
"type": "clip",
"base": "clip",
"save_path": "clip/long_clip",
"save_path": "text_encoders/long_clip",
"description": "Greatly improved TEXT + Detail (as CLIP-L for Flux.1)",
"reference": "https://huggingface.co/zer0int",
"filename": "ViT-L-14-TEXT-detail-improved-hiT-GmP-HF.safetensors",
@ -586,7 +586,7 @@
"name": "ViT-L-14-TEXT-detail-improved-hiT-GmP-HF.safetensors [Long CLIP L]",
"type": "clip",
"base": "clip",
"save_path": "clip/long_clip",
"save_path": "text_encoders/long_clip",
"description": "Greatly improved TEXT + Detail (as CLIP-L for Flux.1)",
"reference": "https://huggingface.co/zer0int",
"filename": "ViT-L-14-TEXT-detail-improved-hiT-GmP-TE-only-HF.safetensors",
@ -629,18 +629,6 @@
"size": "1.39GB"
},
{
"name": "BAAI/SegGPT",
"type": "SegGPT",
"base": "SegGPT",
"save_path": "custom_nodes/comfyui-SegGPT",
"description": "SegGPT",
"reference": "https://huggingface.co/BAAI/SegGPT",
"filename": "seggpt_vit_large.pth",
"url": "https://huggingface.co/BAAI/SegGPT/resolve/main/seggpt_vit_large.pth",
"size": "1.48GB"
},
{
"name": "DMD2 LoRA (4steps)",
"type": "lora",

View File

@ -1,7 +1,7 @@
[project]
name = "comfyui-manager"
description = "ComfyUI-Manager provides features to install and manage custom nodes for ComfyUI, as well as various functionalities to assist with ComfyUI."
version = "3.10"
version = "3.11"
license = { file = "LICENSE.txt" }
dependencies = ["GitPython", "PyGithub", "matrix-client==0.4.0", "transformers", "huggingface-hub>0.20", "typer", "rich", "typing-extensions"]