{ "models": [ { "name": "Comfy-Org/Wan2.1 i2v 480p 14B (bf16)", "type": "diffusion_model", "base": "Wan2.1", "save_path": "diffusion_models/Wan2.1", "description": "Wan2.1 difussion model for i2v 480p 14B (bf16)", "reference": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged", "filename": "wan2.1_i2v_480p_14B_bf16.safetensors", "url": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/resolve/main/split_files/diffusion_models/wan2.1_i2v_480p_14B_bf16.safetensors", "size": "32.8GB" }, { "name": "Comfy-Org/Wan2.1 i2v 480p 14B (fp16)", "type": "diffusion_model", "base": "Wan2.1", "save_path": "diffusion_models/Wan2.1", "description": "Wan2.1 difussion model for i2v 480p 14B (fp16)", "reference": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged", "filename": "wan2.1_i2v_480p_14B_fp16.safetensors", "url": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/resolve/main/split_files/diffusion_models/wan2.1_i2v_480p_14B_fp16.safetensors", "size": "32.8GB" }, { "name": "Comfy-Org/Wan2.1 i2v 480p 14B (fp8_e4m3fn)", "type": "diffusion_model", "base": "Wan2.1", "save_path": "diffusion_models/Wan2.1", "description": "Wan2.1 difussion model for i2v 480p 14B (fp8_e4m3fn)", "reference": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged", "filename": "wan2.1_i2v_480p_14B_fp8_e4m3fn.safetensors", "url": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/resolve/main/split_files/diffusion_models/wan2.1_i2v_480p_14B_fp8_e4m3fn.safetensors", "size": "16.4GB" }, { "name": "Comfy-Org/Wan2.1 i2v 480p 14B (fp8_scaled)", "type": "diffusion_model", "base": "Wan2.1", "save_path": "diffusion_models/Wan2.1", "description": "Wan2.1 difussion model for i2v 480p 14B (fp8_scaled)", "reference": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged", "filename": "wan2.1_i2v_480p_14B_fp8_scaled.safetensors", "url": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/resolve/main/split_files/diffusion_models/wan2.1_i2v_480p_14B_fp8_scaled.safetensors", "size": "16.4GB" }, { "name": "Comfy-Org/Wan2.1 i2v 720p 14B (bf16)", "type": "diffusion_model", "base": "Wan2.1", "save_path": "diffusion_models/Wan2.1", "description": "Wan2.1 difussion model for i2v 720p 14B (bf16)", "reference": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged", "filename": "wan2.1_i2v_720p_14B_bf16.safetensors", "url": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/resolve/main/split_files/diffusion_models/wan2.1_i2v_720p_14B_bf16.safetensors", "size": "32.8GB" }, { "name": "Comfy-Org/Wan2.1 i2v 720p 14B (fp16)", "type": "diffusion_model", "base": "Wan2.1", "save_path": "diffusion_models/Wan2.1", "description": "Wan2.1 difussion model for i2v 720p 14B (fp16)", "reference": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged", "filename": "wan2.1_i2v_720p_14B_fp16.safetensors", "url": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/resolve/main/split_files/diffusion_models/wan2.1_i2v_720p_14B_fp16.safetensors", "size": "32.8GB" }, { "name": "Comfy-Org/Wan2.1 i2v 720p 14B (fp8_e4m3fn)", "type": "diffusion_model", "base": "Wan2.1", "save_path": "diffusion_models/Wan2.1", "description": "Wan2.1 difussion model for i2v 720p 14B (fp8_e4m3fn)", "reference": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged", "filename": "wan2.1_i2v_720p_14B_fp8_e4m3fn.safetensors", "url": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/resolve/main/split_files/diffusion_models/wan2.1_i2v_720p_14B_fp8_e4m3fn.safetensors", "size": "16.4GB" }, { "name": "Comfy-Org/Wan2.1 i2v 720p 14B (fp8_scaled)", "type": "diffusion_model", "base": "Wan2.1", "save_path": "diffusion_models/Wan2.1", "description": "Wan2.1 difussion model for i2v 720p 14B (fp8_scaled)", "reference": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged", "filename": "wan2.1_i2v_720p_14B_fp8_scaled.safetensors", "url": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/resolve/main/split_files/diffusion_models/wan2.1_i2v_720p_14B_fp8_scaled.safetensors", "size": "16.4GB" }, { "name": "Comfy-Org/clip_vision_h.safetensors", "type": "clip_vision", "base": "clip_vision_h", "save_path": "clip_vision", "description": "clip_vision_h model for Wan2.1", "reference": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged", "filename": "clip_vision_h.safetensors", "url": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/resolve/main/split_files/clip_vision/clip_vision_h.safetensors", "size": "1.26GB" }, { "name": "Comfy-Org/Wan2.1 t2v 1.3B (bf16)", "type": "diffusion_model", "base": "Wan2.1", "save_path": "diffusion_models/Wan2.1", "description": "Wan2.1 difussion model for t2v 1.3B (bf16)", "reference": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged", "filename": "wan2.1_t2v_1.3B_bf16.safetensors", "url": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/resolve/main/split_files/diffusion_models/wan2.1_t2v_1.3B_bf16.safetensors", "size": "2.84GB" }, { "name": "Comfy-Org/Wan2.1 t2v 1.3B (fp16)", "type": "diffusion_model", "base": "Wan2.1", "save_path": "diffusion_models/Wan2.1", "description": "Wan2.1 difussion model for t2v 1.3B (fp16)", "reference": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged", "filename": "wan2.1_t2v_1.3B_fp16.safetensors", "url": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/resolve/main/split_files/diffusion_models/wan2.1_t2v_1.3B_fp16.safetensors", "size": "2.84GB" }, { "name": "Comfy-Org/Wan2.1 t2v 14B (bf16)", "type": "diffusion_model", "base": "Wan2.1", "save_path": "diffusion_models/Wan2.1", "description": "Wan2.1 difussion model for t2v 14B (bf16)", "reference": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged", "filename": "wan2.1_t2v_14B_bf16.safetensors", "url": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/resolve/main/split_files/diffusion_models/wan2.1_t2v_14B_bf16.safetensors", "size": "28.6GB" }, { "name": "Comfy-Org/Wan2.1 t2v 14B (fp16)", "type": "diffusion_model", "base": "Wan2.1", "save_path": "diffusion_models/Wan2.1", "description": "Wan2.1 difussion model for t2v 14B (fp16)", "reference": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged", "filename": "wan2.1_t2v_14B_fp16.safetensors", "url": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/resolve/main/split_files/diffusion_models/wan2.1_t2v_14B_fp16.safetensors", "size": "28.6GB" }, { "name": "Comfy-Org/Wan2.1 t2v 14B (fp8_e4m3fn)", "type": "diffusion_model", "base": "Wan2.1", "save_path": "diffusion_models/Wan2.1", "description": "Wan2.1 difussion model for t2v 14B (fp8_e4m3fn)", "reference": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged", "filename": "wan2.1_t2v_14B_fp8_e4m3fn.safetensors", "url": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/resolve/main/split_files/diffusion_models/wan2.1_t2v_14B_fp8_e4m3fn.safetensors", "size": "14.3GB" }, { "name": "Comfy-Org/Wan2.1 t2v 14B (fp8_scaled)", "type": "diffusion_model", "base": "Wan2.1", "save_path": "diffusion_models/Wan2.1", "description": "Wan2.1 difussion model for t2v 14B (fp8_scaled)", "reference": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged", "filename": "wan2.1_t2v_14B_fp8_scaled.safetensors", "url": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/resolve/main/split_files/diffusion_models/wan2.1_t2v_14B_fp8_scaled.safetensors", "size": "14.3GB" }, { "name": "Comfy-Org/Wan2.1 VAE", "type": "vae", "base": "Wan2.1", "save_path": "vae", "description": "Wan2.1 VAE model", "reference": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged", "filename": "wan_2.1_vae.safetensors", "url": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/resolve/main/split_files/vae/wan_2.1_vae.safetensors", "size": "254MB" }, { "name": "Comfy-Org/umt5_xxl_fp16.safetensors", "type": "clip", "base": "umt5_xxl", "save_path": "text_encoders", "description": "umt5_xxl_fp16 text encoder for Wan2.1", "reference": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged", "filename": "umt5_xxl_fp16.safetensors", "url": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/resolve/main/split_files/text_encoders/umt5_xxl_fp16.safetensors", "size": "11.4GB" }, { "name": "Comfy-Org/umt5_xxl_fp8_e4m3fn_scaled.safetensors", "type": "clip", "base": "umt5_xxl", "save_path": "text_encoders", "description": "umt5_xxl_fp8_e4m3fn_scaled text encoder for Wan2.1", "reference": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged", "filename": "umt5_xxl_fp8_e4m3fn_scaled.safetensors", "url": "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/resolve/main/split_files/text_encoders/umt5_xxl_fp8_e4m3fn_scaled.safetensors", "size": "6.74GB" }, { "name": "Comfy-Org/hunyuan_video_image_to_video_720p_bf16.safetensors", "type": "diffusion_model", "base": "Hunyuan Video", "save_path": "diffusion_models/hunyuan_video", "description": "Huyuan Video Image2Video diffusion model. repackaged version.", "reference": "https://huggingface.co/Comfy-Org/HunyuanVideo_repackaged", "filename": "hunyuan_video_image_to_video_720p_bf16.safetensors", "url": "https://huggingface.co/Comfy-Org/HunyuanVideo_repackaged/resolve/main/split_files/diffusion_models/hunyuan_video_image_to_video_720p_bf16.safetensors", "size": "25.6GB" }, { "name": "Comfy-Org/llava_llama3_vision.safetensors", "type": "clip_vision", "base": "LLaVA-Llama-3", "save_path": "text_encoders", "description": "llava_llama3_vision clip vison model. This is required for using Hunyuan Video Image2Video.", "reference": "https://huggingface.co/Comfy-Org/HunyuanVideo_repackaged", "filename": "llava_llama3_vision.safetensors", "url": "https://huggingface.co/Comfy-Org/HunyuanVideo_repackaged/resolve/main/split_files/clip_vision/llava_llama3_vision.safetensors", "size": "649MB" }, { "name": "LTX-Video 2B v0.9.5 Checkpoint", "type": "checkpoint", "base": "LTX-Video", "save_path": "checkpoints/LTXV", "description": "LTX-Video is the first DiT-based video generation model capable of generating high-quality videos in real-time. It produces 24 FPS videos at a 768x512 resolution faster than they can be watched. Trained on a large-scale dataset of diverse videos, the model generates high-resolution videos with realistic and varied content.", "reference": "https://huggingface.co/Lightricks/LTX-Video", "filename": "ltx-video-2b-v0.9.5.safetensors", "url": "https://huggingface.co/Lightricks/LTX-Video/resolve/main/ltx-video-2b-v0.9.5.safetensors", "size": "6.34GB" }, { "name": "kolors/vae/diffusion_pytorch_model.fp16.safetensors", "type": "VAE", "base": "Kolors", "save_path": "vae/kolors", "description": "Kolors VAE", "reference": "https://huggingface.co/Kwai-Kolors/Kolors", "filename": "diffusion_pytorch_model.fp16.safetensors", "url": "https://huggingface.co/Kwai-Kolors/Kolors/resolve/main/vae/diffusion_pytorch_model.fp16.safetensors", "size": "167MB" }, { "name": "kolors/vae/diffusion_pytorch_model.safetensors", "type": "VAE", "base": "Kolors", "save_path": "vae/kolors", "description": "Kolors VAE", "reference": "https://huggingface.co/Kwai-Kolors/Kolors", "filename": "diffusion_pytorch_model.safetensors", "url": "https://huggingface.co/Kwai-Kolors/Kolors/resolve/main/vae/diffusion_pytorch_model.safetensors", "size": "335MB" }, { "name": "deepseek-ai/Janus-Pro-1B", "type": "Janus-Pro", "base": "Janus-Pro", "save_path": "Janus-Pro", "description": "[SNAPSHOT] Janus-Pro-1B model.[w/You cannot download this item on ComfyUI-Manager versions below V3.18]", "reference": "https://huggingface.co/deepseek-ai/Janus-Pro-1B", "filename": "", "url": "deepseek-ai/Janus-Pro-1B", "size": "7.8GB" }, { "name": "deepseek-ai/Janus-Pro-7B", "type": "Janus-Pro", "base": "Janus-Pro", "save_path": "Janus-Pro", "description": "[SNAPSHOT] Janus-Pro-7B model.[w/You cannot download this item on ComfyUI-Manager versions below V3.18]", "reference": "https://huggingface.co/deepseek-ai/Janus-Pro-7B", "filename": "", "url": "deepseek-ai/Janus-Pro-7B", "size": "14.85GB" }, { "name": "Leoxing/pia.ckpt", "type": "animatediff-pia", "base": "SD1.x", "save_path": "animatediff_models", "description": "AnimateDiff-PIA Model", "reference": "https://huggingface.co/Leoxing/PIA/tree/main", "filename": "pia.ckpt", "url": "https://huggingface.co/Leoxing/PIA/resolve/main/pia.ckpt", "size": "1.67GB" }, { "name": "comfyanonymous/cosmos_cv8x8x8_1.0.safetensors", "type": "VAE", "base": "Cosmos-1.0", "save_path": "default", "description": "VAE model for Cosmos 1.0", "reference": "https://huggingface.co/comfyanonymous/cosmos_1.0_text_encoder_and_VAE_ComfyUI/tree/main", "filename": "cosmos_cv8x8x8_1.0.safetensors", "url": "https://huggingface.co/comfyanonymous/cosmos_1.0_text_encoder_and_VAE_ComfyUI/resolve/main/vae/cosmos_cv8x8x8_1.0.safetensors", "size": "211MB" }, { "name": "mcmonkey/Cosmos-1_0-Diffusion-7B-Text2World.safetensors", "type": "diffusion_model", "base": "Cosmos-1.0", "save_path": "diffusion_models/cosmos-1.0", "description": "Cosmos 1.0 Text2World Diffusion Model (7B)", "reference": "https://huggingface.co/mcmonkey/cosmos-1.0", "filename": "Cosmos-1_0-Diffusion-7B-Text2World.safetensors", "url": "https://huggingface.co/mcmonkey/cosmos-1.0/resolve/main/Cosmos-1_0-Diffusion-7B-Text2World.safetensors", "size": "14.5GB" }, { "name": "mcmonkey/Cosmos-1_0-Diffusion-7B-Video2World.safetensors", "type": "diffusion_model", "base": "Cosmos-1.0", "save_path": "diffusion_models/cosmos-1.0", "description": "Cosmos 1.0 Video2World Diffusion Model (7B)", "reference": "https://huggingface.co/mcmonkey/cosmos-1.0", "filename": "Cosmos-1_0-Diffusion-7B-Video2World.safetensors", "url": "https://huggingface.co/mcmonkey/cosmos-1.0/resolve/main/Cosmos-1_0-Diffusion-7B-Video2World.safetensors", "size": "14.5GB" }, { "name": "mcmonkey/Cosmos-1_0-Diffusion-14B-Text2World.safetensors", "type": "diffusion_model", "base": "Cosmos-1.0", "save_path": "diffusion_models/cosmos-1.0", "description": "Cosmos 1.0 Text2World Diffusion Model (14B)", "reference": "https://huggingface.co/mcmonkey/cosmos-1.0", "filename": "Cosmos-1_0-Diffusion-14B-Text2World.safetensors", "url": "https://huggingface.co/mcmonkey/cosmos-1.0/resolve/main/Cosmos-1_0-Diffusion-14B-Text2World.safetensors", "size": "28.5GB" }, { "name": "mcmonkey/Cosmos-1_0-Diffusion-14B-Video2World.safetensors", "type": "diffusion_model", "base": "Cosmos-1.0", "save_path": "diffusion_models/cosmos-1.0", "description": "Cosmos 1.0 Video2World Diffusion Model (14B)", "reference": "https://huggingface.co/mcmonkey/cosmos-1.0", "filename": "Cosmos-1_0-Diffusion-14B-Video2World.safetensors", "url": "https://huggingface.co/mcmonkey/cosmos-1.0/resolve/main/Cosmos-1_0-Diffusion-14B-Video2World.safetensors", "size": "28.5GB" }, { "name": "Comfy-Org/llava_llama3_fp8_scaled.safetensors", "type": "clip", "base": "LLaVA-Llama-3", "save_path": "text_encoders", "description": "llava_llama3_fp8_scaled text encoder model. This is required for using Hunyuan Video.", "reference": "https://huggingface.co/Comfy-Org/HunyuanVideo_repackaged", "filename": "llava_llama3_fp8_scaled.safetensors", "url": "https://huggingface.co/Comfy-Org/HunyuanVideo_repackaged/resolve/main/split_files/text_encoders/llava_llama3_fp8_scaled.safetensors", "size": "9.09GB" }, { "name": "Comfy-Org/llava_llama3_fp16.safetensors", "type": "clip", "base": "LLaVA-Llama-3", "save_path": "text_encoders", "description": "llava_llama3_fp16 text encoder model. This is required for using Hunyuan Video.", "reference": "https://huggingface.co/Comfy-Org/HunyuanVideo_repackaged", "filename": "llava_llama3_fp16.safetensors", "url": "https://huggingface.co/Comfy-Org/HunyuanVideo_repackaged/resolve/main/split_files/text_encoders/llava_llama3_fp16.safetensors", "size": "16.1GB" }, { "name": "PixArt-Sigma-XL-2-512-MS.safetensors (diffusion)", "type": "diffusion_model", "base": "pixart-sigma", "save_path": "diffusion_models/PixArt-Sigma", "description": "PixArt-Sigma Diffusion model", "reference": "https://huggingface.co/PixArt-alpha/PixArt-Sigma-XL-2-512-MS", "filename": "PixArt-Sigma-XL-2-512-MS.safetensors", "url": "https://huggingface.co/PixArt-alpha/PixArt-Sigma-XL-2-512-MS/resolve/main/transformer/diffusion_pytorch_model.safetensors", "size": "2.44GB" }, { "name": "PixArt-Sigma-XL-2-1024-MS.safetensors (diffusion)", "type": "diffusion_model", "base": "pixart-sigma", "save_path": "diffusion_models/PixArt-Sigma", "description": "PixArt-Sigma Diffusion model", "reference": "https://huggingface.co/PixArt-alpha/PixArt-Sigma-XL-2-1024-MS", "filename": "PixArt-Sigma-XL-2-1024-MS.safetensors", "url": "https://huggingface.co/PixArt-alpha/PixArt-Sigma-XL-2-1024-MS/resolve/main/transformer/diffusion_pytorch_model.safetensors", "size": "2.44GB" }, { "name": "PixArt-XL-2-1024-MS.safetensors (diffusion)", "type": "diffusion_model", "base": "pixart-alpha", "save_path": "diffusion_models/PixArt-Alpha", "description": "PixArt-Alpha Diffusion model", "reference": "https://huggingface.co/PixArt-alpha/PixArt-XL-2-1024-MS", "filename": "PixArt-XL-2-1024-MS.safetensors", "url": "https://huggingface.co/PixArt-alpha/PixArt-XL-2-1024-MS/resolve/main/transformer/diffusion_pytorch_model.safetensors", "size": "2.45GB" }, { "name": "Comfy-Org/hunyuan_video_t2v_720p_bf16.safetensors", "type": "diffusion_model", "base": "Hunyuan Video", "save_path": "diffusion_models/hunyuan_video", "description": "Huyuan Video diffusion model. repackaged version.", "reference": "https://huggingface.co/Comfy-Org/HunyuanVideo_repackaged", "filename": "hunyuan_video_t2v_720p_bf16.safetensors", "url": "https://huggingface.co/Comfy-Org/HunyuanVideo_repackaged/resolve/main/split_files/diffusion_models/hunyuan_video_t2v_720p_bf16.safetensors", "size": "25.6GB" }, { "name": "Comfy-Org/hunyuan_video_vae_bf16.safetensors", "type": "VAE", "base": "Hunyuan Video", "save_path": "VAE", "description": "Huyuan Video VAE model. repackaged version.", "reference": "https://huggingface.co/Comfy-Org/HunyuanVideo_repackaged", "filename": "hunyuan_video_vae_bf16.safetensors", "url": "https://huggingface.co/Comfy-Org/HunyuanVideo_repackaged/resolve/main/split_files/vae/hunyuan_video_vae_bf16.safetensors", "size": "493MB" }, { "name": "LTX-Video 2B v0.9.1 Checkpoint", "type": "checkpoint", "base": "LTX-Video", "save_path": "checkpoints/LTXV", "description": "LTX-Video is the first DiT-based video generation model capable of generating high-quality videos in real-time. It produces 24 FPS videos at a 768x512 resolution faster than they can be watched. Trained on a large-scale dataset of diverse videos, the model generates high-resolution videos with realistic and varied content.", "reference": "https://huggingface.co/Lightricks/LTX-Video", "filename": "ltx-video-2b-v0.9.1.safetensors", "url": "https://huggingface.co/Lightricks/LTX-Video/resolve/main/ltx-video-2b-v0.9.1.safetensors", "size": "5.72GB" }, { "name": "XLabs-AI/flux-canny-controlnet-v3.safetensors", "type": "controlnet", "base": "FLUX.1", "save_path": "xlabs/controlnets", "description": "ControlNet checkpoints for FLUX.1-dev model by Black Forest Labs.", "reference": "https://huggingface.co/XLabs-AI/flux-controlnet-collections", "filename": "flux-canny-controlnet-v3.safetensors", "url": "https://huggingface.co/XLabs-AI/flux-controlnet-collections/resolve/main/flux-canny-controlnet-v3.safetensors", "size": "1.49GB" }, { "name": "XLabs-AI/flux-depth-controlnet-v3.safetensors", "type": "controlnet", "base": "FLUX.1", "save_path": "xlabs/controlnets", "description": "ControlNet checkpoints for FLUX.1-dev model by Black Forest Labs.", "reference": "https://huggingface.co/XLabs-AI/flux-controlnet-collections", "filename": "flux-depth-controlnet-v3.safetensors", "url": "https://huggingface.co/XLabs-AI/flux-controlnet-collections/resolve/main/flux-depth-controlnet-v3.safetensors", "size": "1.49GB" }, { "name": "XLabs-AI/flux-hed-controlnet-v3.safetensors", "type": "controlnet", "base": "FLUX.1", "save_path": "xlabs/controlnets", "description": "ControlNet checkpoints for FLUX.1-dev model by Black Forest Labs.", "reference": "https://huggingface.co/XLabs-AI/flux-controlnet-collections", "filename": "flux-hed-controlnet-v3.safetensors", "url": "https://huggingface.co/XLabs-AI/flux-controlnet-collections/resolve/main/flux-hed-controlnet-v3.safetensors", "size": "1.49GB" }, { "name": "XLabs-AI/realism_lora.safetensors", "type": "lora", "base": "FLUX.1", "save_path": "xlabs/loras", "description": "A checkpoint with trained LoRAs for FLUX.1-dev model by Black Forest Labs", "reference": "https://huggingface.co/XLabs-AI/flux-lora-collection", "filename": "realism_lora.safetensors", "url": "https://huggingface.co/XLabs-AI/flux-lora-collection/resolve/main/realism_lora.safetensors", "size": "44.8MB" }, { "name": "XLabs-AI/art_lora.safetensors", "type": "lora", "base": "FLUX.1", "save_path": "xlabs/loras", "description": "A checkpoint with trained LoRAs for FLUX.1-dev model by Black Forest Labs", "reference": "https://huggingface.co/XLabs-AI/flux-lora-collection", "filename": "art_lora.safetensors", "url": "https://huggingface.co/XLabs-AI/flux-lora-collection/resolve/main/scenery_lora.safetensors", "size": "44.8MB" }, { "name": "XLabs-AI/mjv6_lora.safetensors", "type": "lora", "base": "FLUX.1", "save_path": "xlabs/loras", "description": "A checkpoint with trained LoRAs for FLUX.1-dev model by Black Forest Labs", "reference": "https://huggingface.co/XLabs-AI/flux-lora-collection", "filename": "mjv6_lora.safetensors", "url": "https://huggingface.co/XLabs-AI/flux-lora-collection/resolve/main/mjv6_lora.safetensors", "size": "44.8MB" }, { "name": "XLabs-AI/flux-ip-adapter", "type": "lora", "base": "FLUX.1", "save_path": "xlabs/ipadapters", "description": "A checkpoint with trained LoRAs for FLUX.1-dev model by Black Forest Labs", "reference": "https://huggingface.co/XLabs-AI/flux-ip-adapter", "filename": "ip_adapter.safetensors", "url": "https://huggingface.co/XLabs-AI/flux-ip-adapter/resolve/main/ip_adapter.safetensors", "size": "982MB" }, { "name": "stabilityai/SD3.5-Large-Controlnet-Blur", "type": "controlnet", "base": "SD3.5", "save_path": "controlnet/SD3.5", "description": "Blur Controlnet model for SD3.5 Large", "reference": "https://huggingface.co/stabilityai/stable-diffusion-3.5-controlnets", "filename": "sd3.5_large_controlnet_blur.safetensors", "url": "https://huggingface.co/stabilityai/stable-diffusion-3.5-controlnets/resolve/main/sd3.5_large_controlnet_blur.safetensors", "size": "8.65GB" }, { "name": "stabilityai/SD3.5-Large-Controlnet-Canny", "type": "controlnet", "base": "SD3.5", "save_path": "controlnet/SD3.5", "description": "Canny Controlnet model for SD3.5 Large", "reference": "https://huggingface.co/stabilityai/stable-diffusion-3.5-controlnets", "filename": "sd3.5_large_controlnet_canny.safetensors", "url": "https://huggingface.co/stabilityai/stable-diffusion-3.5-controlnets/resolve/main/sd3.5_large_controlnet_canny.safetensors", "size": "8.65GB" }, { "name": "stabilityai/SD3.5-Large-Controlnet-Depth", "type": "controlnet", "base": "SD3.5", "save_path": "controlnet/SD3.5", "description": "Depth Controlnet model for SD3.5 Large", "reference": "https://huggingface.co/stabilityai/stable-diffusion-3.5-controlnets", "filename": "sd3.5_large_controlnet_depth.safetensors", "url": "https://huggingface.co/stabilityai/stable-diffusion-3.5-controlnets/resolve/main/sd3.5_large_controlnet_depth.safetensors", "size": "8.65GB" }, { "name": "LTX-Video 2B v0.9 Checkpoint", "type": "checkpoint", "base": "LTX-Video", "save_path": "checkpoints/LTXV", "description": "LTX-Video is the first DiT-based video generation model capable of generating high-quality videos in real-time. It produces 24 FPS videos at a 768x512 resolution faster than they can be watched. Trained on a large-scale dataset of diverse videos, the model generates high-resolution videos with realistic and varied content.", "reference": "https://huggingface.co/Lightricks/LTX-Video", "filename": "ltx-video-2b-v0.9.safetensors", "url": "https://huggingface.co/Lightricks/LTX-Video/resolve/main/ltx-video-2b-v0.9.safetensors", "size": "9.37GB" }, { "name": "InstantX/FLUX.1-dev-IP-Adapter", "type": "IP-Adapter", "base": "FLUX.1", "save_path": "ipadapter-flux", "description": "FLUX.1-dev-IP-Adapter", "reference": "https://huggingface.co/InstantX/FLUX.1-dev-IP-Adapter", "filename": "ip-adapter.bin", "url": "https://huggingface.co/InstantX/FLUX.1-dev-IP-Adapter/resolve/main/ip-adapter.bin", "size": "5.29GB" }, { "name": "Comfy-Org/sigclip_vision_384 (patch14_384)", "type": "clip_vision", "base": "sigclip", "save_path": "clip_vision", "description": "This clip vision model is required for FLUX.1 Redux.", "reference": "https://huggingface.co/Comfy-Org/sigclip_vision_384/tree/main", "filename": "sigclip_vision_patch14_384.safetensors", "url": "https://huggingface.co/Comfy-Org/sigclip_vision_384/resolve/main/sigclip_vision_patch14_384.safetensors", "size": "857MB" }, { "name": "comfyanonymous/flux_text_encoders - t5xxl (fp16)", "type": "clip", "base": "t5", "save_path": "text_encoders/t5", "description": "Text Encoders for FLUX (fp16)", "reference": "https://huggingface.co/comfyanonymous/flux_text_encoders", "filename": "t5xxl_fp16.safetensors", "url": "https://huggingface.co/comfyanonymous/flux_text_encoders/resolve/main/t5xxl_fp16.safetensors", "size": "9.79GB" }, { "name": "comfyanonymous/flux_text_encoders - t5xxl (fp8_e4m3fn)", "type": "clip", "base": "t5", "save_path": "text_encoders/t5", "description": "Text Encoders for FLUX (fp8_e4m3fn)", "reference": "https://huggingface.co/comfyanonymous/flux_text_encoders", "filename": "t5xxl_fp8_e4m3fn.safetensors", "url": "https://huggingface.co/comfyanonymous/flux_text_encoders/resolve/main/t5xxl_fp8_e4m3fn.safetensors", "size": "4.89GB" }, { "name": "comfyanonymous/flux_text_encoders - t5xxl (fp8_e4m3fn_scaled)", "type": "clip", "base": "t5", "save_path": "text_encoders/t5", "description": "Text Encoders for FLUX (fp16)", "reference": "https://huggingface.co/comfyanonymous/flux_text_encoders", "filename": "t5xxl_fp8_e4m3fn_scaled.safetensors", "url": "https://huggingface.co/comfyanonymous/flux_text_encoders/resolve/main/t5xxl_fp8_e4m3fn_scaled.safetensors", "size": "5.16GB" }, { "name": "FLUX.1 [Dev] Diffusion model (scaled fp8)", "type": "diffusion_model", "base": "FLUX.1", "save_path": "diffusion_models/FLUX1", "description": "FLUX.1 [Dev] Diffusion model (scaled fp8)[w/Due to the large size of the model, it is recommended to download it through a browser if possible.]", "reference": "https://huggingface.co/comfyanonymous/flux_dev_scaled_fp8_test", "filename": "flux_dev_fp8_scaled_diffusion_model.safetensors", "url": "https://huggingface.co/comfyanonymous/flux_dev_scaled_fp8_test/resolve/main/flux_dev_fp8_scaled_diffusion_model.safetensors", "size": "11.9GB" }, { "name": "kijai/MoGe_ViT_L_fp16.safetensors", "type": "MoGe", "base": "MoGe", "save_path": "MoGe", "description": "Safetensors versions of [a/https://github.com/microsoft/MoGe](https://github.com/microsoft/MoGe)", "reference": "https://huggingface.co/Kijai/MoGe_safetensors", "filename": "MoGe_ViT_L_fp16.safetensors", "url": "https://huggingface.co/Kijai/MoGe_safetensors/resolve/main/MoGe_ViT_L_fp16.safetensors", "size": "628MB" }, { "name": "kijai/MoGe_ViT_L_fp16.safetensors", "type": "MoGe", "base": "MoGe", "save_path": "MoGe", "description": "Safetensors versions of [a/https://github.com/microsoft/MoGe](https://github.com/microsoft/MoGe)", "reference": "https://huggingface.co/Kijai/MoGe_safetensors", "filename": "MoGe_ViT_L_fp16.safetensors", "url": "https://huggingface.co/Kijai/MoGe_safetensors/resolve/main/MoGe_ViT_L_fp16.safetensors", "size": "1.26GB" }, { "name": "pulid_flux_v0.9.1.safetensors", "type": "PuLID", "base": "FLUX", "save_path": "pulid", "description": "This is required for PuLID (FLUX)", "reference": "https://huggingface.co/guozinan/PuLID", "filename": "pulid_flux_v0.9.1.safetensors", "url": "https://huggingface.co/guozinan/PuLID/resolve/main/pulid_flux_v0.9.1.safetensors", "size": "1.14GB" }, { "name": "pulid_v1.1.safetensors", "type": "PuLID", "base": "SDXL", "save_path": "pulid", "description": "This is required for PuLID (SDXL)", "reference": "https://huggingface.co/guozinan/PuLID", "filename": "pulid_v1.1.safetensors", "url": "https://huggingface.co/guozinan/PuLID/resolve/main/pulid_v1.1.safetensors", "size": "984MB" }, { "name": "Kolors-IP-Adapter-Plus.bin (Kwai-Kolors/Kolors-IP-Adapter-Plus)", "type": "IP-Adapter", "base": "Kolors", "save_path": "ipadapter", "description": "You can use this model in the [a/ComfyUI IPAdapter plus](https://github.com/cubiq/ComfyUI_IPAdapter_plus) extension.", "reference": "https://huggingface.co/Kwai-Kolors/Kolors-IP-Adapter-Plus", "filename": "Kolors-IP-Adapter-Plus.bin", "url": "https://huggingface.co/Kwai-Kolors/Kolors-IP-Adapter-Plus/resolve/main/ip_adapter_plus_general.bin", "size": "1.01GB" }, { "name": "Kolors-IP-Adapter-FaceID-Plus.bin (Kwai-Kolors/Kolors-IP-Adapter-Plus)", "type": "IP-Adapter", "base": "Kolors", "save_path": "ipadapter", "description": "You can use this model in the [a/ComfyUI IPAdapter plus](https://github.com/cubiq/ComfyUI_IPAdapter_plus) extension.", "reference": "https://huggingface.co/Kwai-Kolors/Kolors-IP-Adapter-FaceID-Plus", "filename": "Kolors-IP-Adapter-FaceID-Plus.bin", "url": "https://huggingface.co/Kwai-Kolors/Kolors-IP-Adapter-FaceID-Plus/resolve/main/ipa-faceid-plus.bin", "size": "2.39GB" } ] }