update DB

This commit is contained in:
Dr.Lt.Data 2024-08-17 16:26:22 +09:00
parent 31e300e4e8
commit 7538169251
2 changed files with 2 additions and 2 deletions

View File

@ -654,7 +654,7 @@
"https://github.com/city96/ComfyUI-GGUF" "https://github.com/city96/ComfyUI-GGUF"
], ],
"install_type": "git-clone", "install_type": "git-clone",
"description": "GGUF Quantization support for native ComfyUI models\nThis is currently very much WIP. These custom nodes provide support for model files stored in the GGUF format popularized by llama.cpp.\nWhile quantization wasn't feasible for regular UNET models (conv2d), transformer/DiT models such as flux seem less affected by quantization. This allows running it in much lower bits per weight variable bitrate quants on low-end GPUs.\n[w/LoRA / Controlnet / etc are currently not supported due to the weights being quantized.]" "description": "GGUF Quantization support for native ComfyUI models\nThis is currently very much WIP. These custom nodes provide support for model files stored in the GGUF format popularized by llama.cpp.\nWhile quantization wasn't feasible for regular UNET models (conv2d), transformer/DiT models such as flux seem less affected by quantization. This allows running it in much lower bits per weight variable bitrate quants on low-end GPUs."
}, },
{ {
"author": "SLAPaper", "author": "SLAPaper",

View File

@ -177,7 +177,7 @@
"https://github.com/city96/ComfyUI-GGUF" "https://github.com/city96/ComfyUI-GGUF"
], ],
"install_type": "git-clone", "install_type": "git-clone",
"description": "GGUF Quantization support for native ComfyUI models\nThis is currently very much WIP. These custom nodes provide support for model files stored in the GGUF format popularized by llama.cpp.\nWhile quantization wasn't feasible for regular UNET models (conv2d), transformer/DiT models such as flux seem less affected by quantization. This allows running it in much lower bits per weight variable bitrate quants on low-end GPUs.\n[w/LoRA / Controlnet / etc are currently not supported due to the weights being quantized.]" "description": "GGUF Quantization support for native ComfyUI models\nThis is currently very much WIP. These custom nodes provide support for model files stored in the GGUF format popularized by llama.cpp.\nWhile quantization wasn't feasible for regular UNET models (conv2d), transformer/DiT models such as flux seem less affected by quantization. This allows running it in much lower bits per weight variable bitrate quants on low-end GPUs."
}, },
{ {
"author": "smthemex", "author": "smthemex",