diff --git a/comfy/model_management.py b/comfy/model_management.py index bcf1399c4..4b78e27cb 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -1446,7 +1446,7 @@ def pytorch_attention_enabled(): return ENABLE_PYTORCH_ATTENTION def pytorch_attention_enabled_vae(): - if is_amd(): + if is_amd() and not SUPPORT_FP8_OPS: # exclude RDNA4 (gfx1200, gfx1201) and CDNA4 (gfx950) that support fp8 return False # enabling pytorch attention on AMD currently causes crash when doing high res return pytorch_attention_enabled() diff --git a/comfy/sd.py b/comfy/sd.py index e573804a5..96c41ee8d 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -440,7 +440,7 @@ class VAE: if 'decoder.up_blocks.0.resnets.0.norm1.weight' in sd.keys(): #diffusers format sd = diffusers_convert.convert_vae_state_dict(sd) - if model_management.is_amd(): + if model_management.is_amd() and not model_management.SUPPORT_FP8_OPS: # exclude RDNA4 (gfx1200, gfx1201) and CDNA4 (gfx950) that support fp8 VAE_KL_MEM_RATIO = 2.73 else: VAE_KL_MEM_RATIO = 1.0