From a4eb32a4ffbfd9171c3dd4e8c11a6a7bfbae5668 Mon Sep 17 00:00:00 2001 From: Christopher Anderson Date: Wed, 29 Oct 2025 23:48:26 +1100 Subject: [PATCH] inserted missing is_amd() check --- comfy/model_management.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index d4dc813f8..f1d7d11b0 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -334,13 +334,14 @@ SUPPORT_FP8_OPS = args.supports_fp8_compute AMD_RDNA2_AND_OLDER_ARCH = ["gfx1030", "gfx1031", "gfx1010", "gfx1011", "gfx1012", "gfx906", "gfx900", "gfx803"] try: - arch = torch.cuda.get_device_properties(get_torch_device()).gcnArchName - if not (any((a in arch) for a in AMD_RDNA2_AND_OLDER_ARCH)): - torch.backends.cudnn.enabled = os.environ.get("TORCH_AMD_CUDNN_ENABLED", "0").strip().lower() not in { - "0", "off", "false", "disable", "disabled", "no"} - if not torch.backends.cudnn.enabled: - logging.info( - "ComfyUI has set torch.backends.cudnn.enabled to False for better AMD performance. Set environment var TORCH_AMD_CUDDNN_ENABLED=1 to enable it again.") + if is_amd(): + arch = torch.cuda.get_device_properties(get_torch_device()).gcnArchName + if not (any((a in arch) for a in AMD_RDNA2_AND_OLDER_ARCH)): + torch.backends.cudnn.enabled = os.environ.get("TORCH_AMD_CUDNN_ENABLED", "0").strip().lower() not in { + "0", "off", "false", "disable", "disabled", "no"} + if not torch.backends.cudnn.enabled: + logging.info( + "ComfyUI has set torch.backends.cudnn.enabled to False for better AMD performance. Set environment var TORCH_AMD_CUDDNN_ENABLED=1 to enable it again.") try: rocm_version = tuple(map(int, str(torch.version.hip).split(".")[:2]))