From 2c5b9da6c47f0eab24f59cf4bed6de02ee797f75 Mon Sep 17 00:00:00 2001 From: strint Date: Fri, 12 Dec 2025 17:50:35 +0800 Subject: [PATCH] rm debug log --- comfy/model_base.py | 10 +--------- comfy/model_management.py | 19 +------------------ comfy/sd.py | 1 - comfy/utils.py | 3 --- 4 files changed, 2 insertions(+), 31 deletions(-) diff --git a/comfy/model_base.py b/comfy/model_base.py index c3c32810b..6b8a8454d 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -61,7 +61,6 @@ import math from typing import TYPE_CHECKING if TYPE_CHECKING: from comfy.model_patcher import ModelPatcher -from comfy.model_management import get_free_memory class ModelType(Enum): EPS = 1 @@ -305,15 +304,8 @@ class BaseModel(torch.nn.Module): if k.startswith(unet_prefix): to_load[k[len(unet_prefix):]] = sd.pop(k) - free_cpu_memory = get_free_memory(torch.device("cpu")) - logging.debug(f"load model weights start, free cpu memory size {free_cpu_memory/(1024*1024*1024)} GB") - logging.debug(f"model destination device {next(self.diffusion_model.parameters()).device}") to_load = self.model_config.process_unet_state_dict(to_load) - logging.debug(f"load model {self.model_config} weights process end") - # replace tensor with mmap tensor by assign - m, u = self.diffusion_model.load_state_dict(to_load, strict=False, assign=True) - free_cpu_memory = get_free_memory(torch.device("cpu")) - logging.debug(f"load model {self.model_config} weights end, free cpu memory size {free_cpu_memory/(1024*1024*1024)} GB") + m, u = self.diffusion_model.load_state_dict(to_load, strict=False) if len(m) > 0: logging.warning("unet missing: {}".format(m)) diff --git a/comfy/model_management.py b/comfy/model_management.py index d598a854c..5105111c6 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -533,18 +533,11 @@ class LoadedModel: return False def model_unload(self, memory_to_free=None, unpatch_weights=True): - logging.debug(f"model_unload: {self.model.model.__class__.__name__}") - logging.debug(f"memory_to_free: {memory_to_free/(1024*1024*1024)} GB") - logging.debug(f"unpatch_weights: {unpatch_weights}") - logging.debug(f"loaded_size: {self.model.loaded_size()/(1024*1024*1024)} GB") - logging.debug(f"offload_device: {self.model.offload_device}") - if memory_to_free is None: # free the full model memory_to_free = self.model.loaded_size() available_memory = get_free_memory(self.model.offload_device) - logging.debug(f"before unload, available_memory of offload device {self.model.offload_device}: {available_memory/(1024*1024*1024)} GB") mmap_mem_threshold = get_mmap_mem_threshold_gb() * 1024 * 1024 * 1024 # this is reserved memory for other system usage if memory_to_free > available_memory - mmap_mem_threshold or memory_to_free < self.model.loaded_size(): @@ -553,22 +546,15 @@ class LoadedModel: partially_unload = False if partially_unload: - logging.debug("Do partially unload") freed = self.model.partially_unload(self.model.offload_device, memory_to_free) - logging.debug(f"partially_unload freed vram: {freed/(1024*1024*1024)} GB") if freed < memory_to_free: - logging.warning(f"Partially unload not enough memory, freed {freed/(1024*1024*1024)} GB, memory_to_free {memory_to_free/(1024*1024*1024)} GB") + logging.debug(f"Partially unload not enough memory, freed {freed/(1024*1024*1024)} GB, memory_to_free {memory_to_free/(1024*1024*1024)} GB") else: - logging.debug("Do full unload") self.model.detach(unpatch_weights) - logging.debug("Do full unload done") self.model_finalizer.detach() self.model_finalizer = None self.real_model = None - available_memory = get_free_memory(self.model.offload_device) - logging.debug(f"after unload, available_memory of offload device {self.model.offload_device}: {available_memory/(1024*1024*1024)} GB") - if partially_unload: return False else: @@ -622,7 +608,6 @@ def minimum_inference_memory(): return (1024 * 1024 * 1024) * 0.8 + extra_reserved_memory() def free_memory(memory_required, device, keep_loaded=[]): - logging.debug("start to free mem") cleanup_models_gc() unloaded_model = [] can_unload = [] @@ -660,7 +645,6 @@ def free_memory(memory_required, device, keep_loaded=[]): return unloaded_models def load_models_gpu(models, memory_required=0, force_patch_weights=False, minimum_memory_required=None, force_full_load=False): - logging.debug(f"start to load models") cleanup_models_gc() global vram_state @@ -682,7 +666,6 @@ def load_models_gpu(models, memory_required=0, force_patch_weights=False, minimu models_to_load = [] for x in models: - logging.debug(f"start loading model to vram: {x.model.__class__.__name__}") loaded_model = LoadedModel(x) try: loaded_model_index = current_loaded_models.index(loaded_model) diff --git a/comfy/sd.py b/comfy/sd.py index 7c00337a6..754b1703d 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -1466,7 +1466,6 @@ def load_diffusion_model_state_dict(sd, model_options={}, metadata=None): logging.warning("{} {}".format(diffusers_keys[k], k)) offload_device = model_management.unet_offload_device() - logging.debug(f"loader load model to offload device: {offload_device}") unet_weight_dtype = list(model_config.supported_inference_dtypes) if model_config.quant_config is not None: weight_dtype = None diff --git a/comfy/utils.py b/comfy/utils.py index 8dc33a411..89846bc95 100644 --- a/comfy/utils.py +++ b/comfy/utils.py @@ -61,8 +61,6 @@ def load_torch_file(ckpt, safe_load=False, device=None, return_metadata=False): metadata = None if ckpt.lower().endswith(".safetensors") or ckpt.lower().endswith(".sft"): try: - if not DISABLE_MMAP: - logging.debug(f"load_torch_file of safetensors into mmap True") with safetensors.safe_open(ckpt, framework="pt", device=device.type) as f: sd = {} for k in f.keys(): @@ -83,7 +81,6 @@ def load_torch_file(ckpt, safe_load=False, device=None, return_metadata=False): else: torch_args = {} if MMAP_TORCH_FILES: - logging.debug(f"load_torch_file of torch state dict into mmap True") torch_args["mmap"] = True if safe_load or ALWAYS_SAFE_LOAD: