From 7ed9292532cb88ca6a0689afcf2e6e8af49489f2 Mon Sep 17 00:00:00 2001 From: doctorpangloss <2229300+doctorpangloss@users.noreply.github.com> Date: Tue, 21 Oct 2025 14:27:16 -0700 Subject: [PATCH] fix gguf logs to debug --- comfy/gguf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/gguf.py b/comfy/gguf.py index 1bf0eb9b5..ec6ac77a7 100644 --- a/comfy/gguf.py +++ b/comfy/gguf.py @@ -777,7 +777,7 @@ def gguf_sd_loader(path, handle_prefix="model.diffusion_model.", return_arch=Fal qtype_dict[tensor_type_str] = qtype_dict.get(tensor_type_str, 0) + 1 # print loaded tensor type counts - logger.info("gguf qtypes: " + ", ".join(f"{k} ({v})" for k, v in qtype_dict.items())) + logger.debug("gguf qtypes: " + ", ".join(f"{k} ({v})" for k, v in qtype_dict.items())) # mark largest tensor for vram estimation qsd = {k: v for k, v in state_dict.items() if is_quantized(v)}