diff --git a/comfy/customzluda/zluda.py b/comfy/customzluda/zluda.py index c11aea01c..c762fe2ec 100644 --- a/comfy/customzluda/zluda.py +++ b/comfy/customzluda/zluda.py @@ -676,13 +676,12 @@ def do_hijack(): print(" :: Configuring PyTorch backends...") torch.backends.cuda.enable_mem_efficient_sdp(False) torch.backends.cuda.enable_mem_efficient_sdp = do_nothing - torch.backends.cudnn.enabled = False - torch.backends.cudnn.benchmark = False torch.backends.cudnn.enabled = os.environ.get("TORCH_BACKENDS_CUDNN_ENABLED", "1").strip().lower() not in {"0", "off", "false", "disable", "disabled", "no"} if torch.backends.cudnn.enabled: print(" :: Enabled cuDNN") else: print(" :: Disabled cuDNN") + torch.backends.cudnn.benchmark = False if hasattr(torch.backends.cuda, "enable_flash_sdp"): torch.backends.cuda.enable_flash_sdp(True) print(" :: Disabled CUDA flash attention")