Update logging when models are loaded

The "Loaded " log was logged even if no model were actually loaded into VRAM
This commit is contained in:
Rafał Leszko 2025-02-04 14:44:12 +01:00 committed by GitHub
parent 1488f2c59b
commit 0d15a091c2
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -551,7 +551,6 @@ def load_models_gpu(models: Sequence[ModelManageable], memory_required: int = 0,
_load_models_gpu(models, memory_required, force_patch_weights, minimum_memory_required, force_full_load) _load_models_gpu(models, memory_required, force_patch_weights, minimum_memory_required, force_full_load)
to_load = list(map(str, models)) to_load = list(map(str, models))
span.set_attribute("models", to_load) span.set_attribute("models", to_load)
logger.info(f"Loaded {to_load}")
def _load_models_gpu(models: Sequence[ModelManageable], memory_required: int = 0, force_patch_weights=False, minimum_memory_required=None, force_full_load=False) -> None: def _load_models_gpu(models: Sequence[ModelManageable], memory_required: int = 0, force_patch_weights=False, minimum_memory_required=None, force_full_load=False) -> None:
@ -627,6 +626,7 @@ def _load_models_gpu(models: Sequence[ModelManageable], memory_required: int = 0
loaded_model.model_load(lowvram_model_memory, force_patch_weights=force_patch_weights) loaded_model.model_load(lowvram_model_memory, force_patch_weights=force_patch_weights)
current_loaded_models.insert(0, loaded_model) current_loaded_models.insert(0, loaded_model)
logger.info(f"Loaded {loaded_model}")
span = get_current_span() span = get_current_span()
span.set_attribute("models_to_load", list(map(str, models_to_load))) span.set_attribute("models_to_load", list(map(str, models_to_load)))