Print memory summary on OOM to help with debugging. (#11613)

This commit is contained in:
comfyanonymous 2026-01-03 19:28:38 -08:00 committed by GitHub
parent 9a552df898
commit 53e762a3af
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 5 additions and 0 deletions

View File

@ -1542,6 +1542,10 @@ def soft_empty_cache(force=False):
def unload_all_models(): def unload_all_models():
free_memory(1e30, get_torch_device()) free_memory(1e30, get_torch_device())
def debug_memory_summary():
if is_amd() or is_nvidia():
return torch.cuda.memory.memory_summary()
return ""
#TODO: might be cleaner to put this somewhere else #TODO: might be cleaner to put this somewhere else
import threading import threading

View File

@ -601,6 +601,7 @@ async def execute(server, dynprompt, caches, current_item, extra_data, executed,
if isinstance(ex, comfy.model_management.OOM_EXCEPTION): if isinstance(ex, comfy.model_management.OOM_EXCEPTION):
tips = "This error means you ran out of memory on your GPU.\n\nTIPS: If the workflow worked before you might have accidentally set the batch_size to a large number." tips = "This error means you ran out of memory on your GPU.\n\nTIPS: If the workflow worked before you might have accidentally set the batch_size to a large number."
logging.info("Memory summary: {}".format(comfy.model_management.debug_memory_summary()))
logging.error("Got an OOM, unloading all loaded models.") logging.error("Got an OOM, unloading all loaded models.")
comfy.model_management.unload_all_models() comfy.model_management.unload_all_models()