Revert "Disable pytorch attention in VAE for AMD."

It causes crashes even without pytorch attention for big sizes, and for
resonable sizes it is significantly faster.

This reverts commit 1cd6cd6080.
This commit is contained in:
Kacper Michajłow 2025-05-26 14:13:08 +02:00
parent 560b1bdfca
commit 9519e2d49d
No known key found for this signature in database
GPG Key ID: 6580132338ABD4E2
2 changed files with 1 additions and 6 deletions

View File

@ -297,7 +297,7 @@ def vae_attention():
if model_management.xformers_enabled_vae():
logging.info("Using xformers attention in VAE")
return xformers_attention
elif model_management.pytorch_attention_enabled_vae():
elif model_management.pytorch_attention_enabled():
logging.info("Using pytorch attention in VAE")
return pytorch_attention
else:

View File

@ -1114,11 +1114,6 @@ def pytorch_attention_enabled():
global ENABLE_PYTORCH_ATTENTION
return ENABLE_PYTORCH_ATTENTION
def pytorch_attention_enabled_vae():
if is_amd():
return False # enabling pytorch attention on AMD currently causes crash when doing high res
return pytorch_attention_enabled()
def pytorch_attention_flash_attention():
global ENABLE_PYTORCH_ATTENTION
if ENABLE_PYTORCH_ATTENTION: