mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2026-01-26 22:30:19 +08:00
had to move cudnn disablement up much higher
This commit is contained in:
parent
cd3d60254b
commit
1b9a3b12c2
@ -431,6 +431,14 @@ try:
|
|||||||
import triton.language as tl
|
import triton.language as tl
|
||||||
print(" :: Triton core imported successfully")
|
print(" :: Triton core imported successfully")
|
||||||
|
|
||||||
|
# This needs to be up here, so it can disable cudnn before anything can even think about using it
|
||||||
|
torch.backends.cudnn.enabled = os.environ.get("TORCH_BACKENDS_CUDNN_ENABLED", "1").strip().lower() not in {"0", "off", "false", "disable", "disabled", "no"}
|
||||||
|
if torch.backends.cudnn.enabled:
|
||||||
|
print(" :: Enabled cuDNN")
|
||||||
|
else:
|
||||||
|
print(" :: Disabled cuDNN")
|
||||||
|
torch.backends.cudnn.benchmark = False
|
||||||
|
|
||||||
@triton.jit
|
@triton.jit
|
||||||
def _zluda_kernel_test(x_ptr, y_ptr, n_elements, BLOCK_SIZE: tl.constexpr):
|
def _zluda_kernel_test(x_ptr, y_ptr, n_elements, BLOCK_SIZE: tl.constexpr):
|
||||||
pid = tl.program_id(axis=0)
|
pid = tl.program_id(axis=0)
|
||||||
@ -676,12 +684,6 @@ def do_hijack():
|
|||||||
print(" :: Configuring PyTorch backends...")
|
print(" :: Configuring PyTorch backends...")
|
||||||
torch.backends.cuda.enable_mem_efficient_sdp(False)
|
torch.backends.cuda.enable_mem_efficient_sdp(False)
|
||||||
torch.backends.cuda.enable_mem_efficient_sdp = do_nothing
|
torch.backends.cuda.enable_mem_efficient_sdp = do_nothing
|
||||||
torch.backends.cudnn.enabled = os.environ.get("TORCH_BACKENDS_CUDNN_ENABLED", "1").strip().lower() not in {"0", "off", "false", "disable", "disabled", "no"}
|
|
||||||
if torch.backends.cudnn.enabled:
|
|
||||||
print(" :: Enabled cuDNN")
|
|
||||||
else:
|
|
||||||
print(" :: Disabled cuDNN")
|
|
||||||
torch.backends.cudnn.benchmark = False
|
|
||||||
if hasattr(torch.backends.cuda, "enable_flash_sdp"):
|
if hasattr(torch.backends.cuda, "enable_flash_sdp"):
|
||||||
torch.backends.cuda.enable_flash_sdp(True)
|
torch.backends.cuda.enable_flash_sdp(True)
|
||||||
print(" :: Disabled CUDA flash attention")
|
print(" :: Disabled CUDA flash attention")
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user