This commit is contained in:
Vijaysinh 2025-12-14 11:02:46 +01:00 committed by GitHub
commit 58587a4fd4
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
5 changed files with 80 additions and 7 deletions

3
comfy/extra_config.json Normal file
View File

@ -0,0 +1,3 @@
{
"unload_text_encoder_after_run": true
}

View File

@ -15,7 +15,8 @@
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
import gc
import torch
import psutil
import logging
from enum import Enum
@ -1085,10 +1086,9 @@ def cast_to(weight, dtype=None, device=None, non_blocking=False, copy=False, str
if dtype is None or weight.dtype == dtype:
return weight
if stream is not None:
wf_context = stream
if hasattr(wf_context, "as_context"):
wf_context = wf_context.as_context(stream)
with wf_context:
if not hasattr(stream, "__enter__"):
logging.error(f"Stream object {stream} of type {type(stream)} does not have __enter__ method")
with stream:
return weight.to(dtype=dtype, copy=copy)
return weight.to(dtype=dtype, copy=copy)
@ -1552,3 +1552,21 @@ def throw_exception_if_processing_interrupted():
if interrupt_processing:
interrupt_processing = False
raise InterruptProcessingException()
def cleanup_ram():
gc.collect()
try:
torch.cuda.empty_cache()
except:
pass
def unload_text_encoder(encoder):
if encoder is None:
return
try:
if hasattr(encoder, "model"):
del encoder.model
del encoder
except:
pass
cleanup_ram()

View File

@ -736,6 +736,31 @@ class PromptExecutor:
"outputs": ui_outputs,
"meta": meta_outputs,
}
try:
import comfy.model_management as mm
# If ComfyUI exposes loaded text encoders (most builds do)
if hasattr(mm, "loaded_text_encoders"):
for enc in list(mm.loaded_text_encoders.values()):
try:
mm.unload_text_encoder(enc)
except:
pass
mm.loaded_text_encoders.clear()
# Final RAM + VRAM cleanup
try:
mm.cleanup_models_gc()
except:
pass
print("[RAM Optimizer] Text encoders unloaded successfully after run.")
except Exception as e:
print(f"[RAM Optimizer] Failed to unload text encoders: {e}")
# --- END: Text Encoder RAM Cleanup Patch ---
self.server.last_node_id = None
if comfy.model_management.DISABLE_SMART_MEMORY:
comfy.model_management.unload_all_models()

View File

@ -2247,7 +2247,7 @@ async def init_external_custom_nodes():
logging.info(f"Skipping {possible_module} due to disable_all_custom_nodes and whitelist_custom_nodes")
continue
if args.enable_manager:
if getattr(args, "enable_manager", False):
if comfyui_manager.should_be_disabled(module_path):
logging.info(f"Blocked by policy: {module_path}")
continue

27
reproduce_stream_error.py Normal file
View File

@ -0,0 +1,27 @@
import torch
import logging
logging.basicConfig(level=logging.INFO)
def test_stream():
if not torch.cuda.is_available():
print("CUDA not available, cannot test cuda stream")
return
device = torch.device("cuda")
stream = torch.cuda.Stream(device=device, priority=0)
print(f"Stream type: {type(stream)}")
print(f"Has __enter__: {hasattr(stream, '__enter__')}")
try:
with stream:
print("Stream context manager works")
except AttributeError as e:
print(f"AttributeError caught: {e}")
except Exception as e:
print(f"Other exception caught: {e}")
if __name__ == "__main__":
test_stream()