From f125a37bdfe523dda7ec9afd123b18366ef58a17 Mon Sep 17 00:00:00 2001 From: patientx Date: Fri, 14 Feb 2025 12:33:27 +0300 Subject: [PATCH] Update model_management.py --- comfy/model_management.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index dd8a2a28f..0f8f688e7 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -25,6 +25,7 @@ import sys import platform import weakref import gc +import zluda class VRAMState(Enum): DISABLED = 0 #No vram present: no need to move models to vram @@ -249,10 +250,10 @@ except: pass -if ENABLE_PYTORCH_ATTENTION: - torch.backends.cuda.enable_math_sdp(True) - torch.backends.cuda.enable_flash_sdp(True) - torch.backends.cuda.enable_mem_efficient_sdp(True) +# if ENABLE_PYTORCH_ATTENTION: + # torch.backends.cuda.enable_math_sdp(True) + # torch.backends.cuda.enable_flash_sdp(True) + # torch.backends.cuda.enable_mem_efficient_sdp(True) try: if is_nvidia() and args.fast: