mirror of
https://github.com/Comfy-Org/ComfyUI-Manager.git
synced 2025-12-18 02:43:00 +08:00
update model dB
support .zip file model
This commit is contained in:
parent
39e937e991
commit
e941c75cee
@ -15,6 +15,7 @@ import threading
|
||||
import json
|
||||
import time
|
||||
import yaml
|
||||
import zipfile
|
||||
|
||||
glob_path = os.path.join(os.path.dirname(__file__)) # ComfyUI-Manager/glob
|
||||
sys.path.append(glob_path)
|
||||
@ -22,7 +23,7 @@ sys.path.append(glob_path)
|
||||
import cm_global
|
||||
from manager_util import *
|
||||
|
||||
version = [2, 29]
|
||||
version = [2, 30]
|
||||
version_str = f"V{version[0]}.{version[1]}" + (f'.{version[2]}' if len(version) > 2 else '')
|
||||
|
||||
comfyui_manager_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
|
||||
@ -1156,3 +1157,30 @@ async def extract_nodes_from_workflow(filepath, mode='local', channel_url='defau
|
||||
|
||||
return used_exts, unknown_nodes
|
||||
|
||||
|
||||
def unzip(model_path):
|
||||
if not os.path.exists(model_path):
|
||||
print(f"[ComfyUI-Manager] unzip: File not found: {model_path}")
|
||||
return False
|
||||
|
||||
base_dir = os.path.dirname(model_path)
|
||||
filename = os.path.basename(model_path)
|
||||
target_dir = os.path.join(base_dir, filename[:-4])
|
||||
|
||||
os.makedirs(target_dir, exist_ok=True)
|
||||
|
||||
with zipfile.ZipFile(model_path, 'r') as zip_ref:
|
||||
zip_ref.extractall(target_dir)
|
||||
|
||||
# Check if there's only one directory inside the target directory
|
||||
contents = os.listdir(target_dir)
|
||||
if len(contents) == 1 and os.path.isdir(os.path.join(target_dir, contents[0])):
|
||||
nested_dir = os.path.join(target_dir, contents[0])
|
||||
# Move each file and sub-directory in the nested directory up to the target directory
|
||||
for item in os.listdir(nested_dir):
|
||||
shutil.move(os.path.join(nested_dir, item), os.path.join(target_dir, item))
|
||||
# Remove the now empty nested directory
|
||||
os.rmdir(nested_dir)
|
||||
|
||||
os.remove(model_path)
|
||||
return True
|
||||
|
||||
@ -508,7 +508,12 @@ def check_model_installed(json_obj):
|
||||
item['installed'] = 'None'
|
||||
|
||||
if model_path is not None:
|
||||
if os.path.exists(model_path):
|
||||
if model_path.endswith('.zip'):
|
||||
if os.path.exists(model_path[:-4]):
|
||||
item['installed'] = 'True'
|
||||
else:
|
||||
item['installed'] = 'False'
|
||||
elif os.path.exists(model_path):
|
||||
item['installed'] = 'True'
|
||||
else:
|
||||
item['installed'] = 'False'
|
||||
@ -915,10 +920,17 @@ async def install_model(request):
|
||||
model_url.startswith('https://github.com') or model_url.startswith('https://huggingface.co') or model_url.startswith('https://heibox.uni-heidelberg.de')):
|
||||
model_dir = get_model_dir(json_data)
|
||||
download_url(model_url, model_dir, filename=json_data['filename'])
|
||||
if model_path.endswith('.zip'):
|
||||
res = core.unzip(model_path)
|
||||
else:
|
||||
res = True
|
||||
|
||||
return web.json_response({}, content_type='application/json')
|
||||
if res:
|
||||
return web.json_response({}, content_type='application/json')
|
||||
else:
|
||||
res = download_url_with_agent(model_url, model_path)
|
||||
if res and model_path.endswith('.zip'):
|
||||
res = core.unzip(model_path)
|
||||
else:
|
||||
print(f"Model installation error: invalid model type - {json_data['type']}")
|
||||
|
||||
@ -926,7 +938,6 @@ async def install_model(request):
|
||||
return web.json_response({}, content_type='application/json')
|
||||
except Exception as e:
|
||||
print(f"[ERROR] {e}", file=sys.stderr)
|
||||
pass
|
||||
|
||||
return web.Response(status=400)
|
||||
|
||||
|
||||
@ -2571,6 +2571,16 @@
|
||||
"reference": "https://huggingface.co/InstantX/InstantID",
|
||||
"filename": "diffusion_pytorch_model.safetensors",
|
||||
"url": "https://huggingface.co/InstantX/InstantID/resolve/main/ControlNetModel/diffusion_pytorch_model.safetensors"
|
||||
},
|
||||
{
|
||||
"name": "MonsterMMORPG/insightface (for InstantID)",
|
||||
"type": "insightface",
|
||||
"base": "SDXL",
|
||||
"save_path": "insightface/models",
|
||||
"description": "MonsterMMORPG insightface model for cubiq/InstantID",
|
||||
"reference": "https://huggingface.co/MonsterMMORPG/tools/tree/main",
|
||||
"filename": "antelopev2.zip",
|
||||
"url": "https://huggingface.co/MonsterMMORPG/tools/resolve/main/antelopev2.zip"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@ -1,5 +1,15 @@
|
||||
{
|
||||
"models": [
|
||||
{
|
||||
"name": "MonsterMMORPG/insightface (for InstantID)",
|
||||
"type": "insightface",
|
||||
"base": "SDXL",
|
||||
"save_path": "insightface/models",
|
||||
"description": "MonsterMMORPG insightface model for cubiq/InstantID",
|
||||
"reference": "https://huggingface.co/MonsterMMORPG/tools/tree/main",
|
||||
"filename": "antelopev2.zip",
|
||||
"url": "https://huggingface.co/MonsterMMORPG/tools/resolve/main/antelopev2.zip"
|
||||
},
|
||||
{
|
||||
"name": "InstantID/ip-adapter",
|
||||
"type": "instantid",
|
||||
@ -691,27 +701,6 @@
|
||||
"reference": "https://huggingface.co/guoyww/animatediff",
|
||||
"filename": "v3_sd15_adapter.ckpt",
|
||||
"url": "https://huggingface.co/guoyww/animatediff/resolve/main/v3_sd15_adapter.ckpt"
|
||||
},
|
||||
|
||||
{
|
||||
"name": "Segmind-Vega",
|
||||
"type": "checkpoints",
|
||||
"base": "segmind-vega",
|
||||
"save_path": "checkpoints/segmind-vega",
|
||||
"description": "The Segmind-Vega Model is a distilled version of the Stable Diffusion XL (SDXL), offering a remarkable 70% reduction in size and an impressive 100% speedup while retaining high-quality text-to-image generation capabilities.",
|
||||
"reference": "https://huggingface.co/segmind/Segmind-Vega",
|
||||
"filename": "segmind-vega.safetensors",
|
||||
"url": "https://huggingface.co/segmind/Segmind-Vega/resolve/main/segmind-vega.safetensors"
|
||||
},
|
||||
{
|
||||
"name": "Segmind-VegaRT - Latent Consistency Model (LCM) LoRA of Segmind-Vega",
|
||||
"type": "lora",
|
||||
"base": "segmind-vega",
|
||||
"save_path": "loras/segmind-vega",
|
||||
"description": "Segmind-VegaRT a distilled consistency adapter for Segmind-Vega that allows to reduce the number of inference steps to only between 2 - 8 steps.",
|
||||
"reference": "https://huggingface.co/segmind/Segmind-VegaRT",
|
||||
"filename": "pytorch_lora_weights.safetensors",
|
||||
"url": "https://huggingface.co/segmind/Segmind-VegaRT/resolve/main/pytorch_lora_weights.safetensors"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
Loading…
Reference in New Issue
Block a user