diff --git a/comfy/ldm/modules/diffusionmodules/model.py b/comfy/ldm/modules/diffusionmodules/model.py index 4245eedca..a2f984b00 100644 --- a/comfy/ldm/modules/diffusionmodules/model.py +++ b/comfy/ldm/modules/diffusionmodules/model.py @@ -297,7 +297,7 @@ def vae_attention(): if model_management.xformers_enabled_vae(): logging.info("Using xformers attention in VAE") return xformers_attention - elif model_management.pytorch_attention_enabled_vae(): + elif model_management.pytorch_attention_enabled(): logging.info("Using pytorch attention in VAE") return pytorch_attention else: diff --git a/comfy/model_management.py b/comfy/model_management.py index a2c318ec3..c6b2c3ac8 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -1114,11 +1114,6 @@ def pytorch_attention_enabled(): global ENABLE_PYTORCH_ATTENTION return ENABLE_PYTORCH_ATTENTION -def pytorch_attention_enabled_vae(): - if is_amd(): - return False # enabling pytorch attention on AMD currently causes crash when doing high res - return pytorch_attention_enabled() - def pytorch_attention_flash_attention(): global ENABLE_PYTORCH_ATTENTION if ENABLE_PYTORCH_ATTENTION: