mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2025-12-23 13:00:54 +08:00
Add function to unload all models from RAM and GPU
This function unloads all models from RAM and GPU, freeing resources when a user cancels. It handles exceptions during the unload process and logs the outcome.
This commit is contained in:
parent
52e778fff3
commit
6f88e549ff
@ -1524,3 +1524,28 @@ def throw_exception_if_processing_interrupted():
|
||||
if interrupt_processing:
|
||||
interrupt_processing = False
|
||||
raise InterruptProcessingException()
|
||||
|
||||
def unload_all_models_full():
|
||||
"""
|
||||
Completely unloads all models from RAM and GPU when the user cancels.
|
||||
Frees CPU RAM, GPU VRAM, and clears python references.
|
||||
"""
|
||||
global current_loaded_models
|
||||
try:
|
||||
# Unload every model object
|
||||
for m in current_loaded_models:
|
||||
try:
|
||||
m.model_unload(memory_to_free=None, unpatch_weights=True)
|
||||
except:
|
||||
pass
|
||||
current_loaded_models.clear()
|
||||
|
||||
# Force Python GC
|
||||
gc.collect()
|
||||
|
||||
# Clear GPU memory
|
||||
soft_empty_cache(force=True)
|
||||
|
||||
logging.info("All models unloaded successfully (manual full unload).")
|
||||
except Exception as e:
|
||||
logging.warning(f"Model unload warning: {e}")
|
||||
|
||||
Loading…
Reference in New Issue
Block a user