fixes to use pytorch-attention

This commit is contained in:
patientx 2025-02-13 19:17:35 +03:00 committed by GitHub
parent f9ee02080f
commit bce4176d3d
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -237,10 +237,10 @@ try:
except:
pass
if ENABLE_PYTORCH_ATTENTION:
torch.backends.cuda.enable_math_sdp(True)
torch.backends.cuda.enable_flash_sdp(True)
torch.backends.cuda.enable_mem_efficient_sdp(True)
#if ENABLE_PYTORCH_ATTENTION:
# torch.backends.cuda.enable_math_sdp(True)
# torch.backends.cuda.enable_flash_sdp(True)
# torch.backends.cuda.enable_mem_efficient_sdp(True)
try:
if is_nvidia() and args.fast: