diff --git a/comfy/model_management.py b/comfy/model_management.py index 052dfb775..c19311eab 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -203,6 +203,8 @@ def get_autocast_device(dev): def xformers_enabled(): if vram_state == CPU: return False + if torch.cuda.get_device_properties("cuda").name == "AMD": + return False return XFORMERS_IS_AVAILBLE @@ -268,6 +270,9 @@ def should_use_fp16(): props = torch.cuda.get_device_properties("cuda") if props.major < 7: return False + + if "AMD" in props.name: + return False #FP32 is faster on those cards? nvidia_16_series = ["1660", "1650", "1630", "T500", "T550", "T600"]