From 00184d155ba67ba6e9579bd7ef2ede93217a3520 Mon Sep 17 00:00:00 2001 From: Christopher Anderson Date: Fri, 24 Oct 2025 18:40:17 +1100 Subject: [PATCH 1/3] Add environment variable to opt out of #10302 (forced disablement of cudnn for all AMD users) --- comfy/model_management.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index 79d6ff9d4..ff84af4c5 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -334,11 +334,13 @@ SUPPORT_FP8_OPS = args.supports_fp8_compute AMD_RDNA2_AND_OLDER_ARCH = ["gfx1030", "gfx1031", "gfx1010", "gfx1011", "gfx1012", "gfx906", "gfx900", "gfx803"] try: - if is_amd(): - arch = torch.cuda.get_device_properties(get_torch_device()).gcnArchName - if not (any((a in arch) for a in AMD_RDNA2_AND_OLDER_ARCH)): - torch.backends.cudnn.enabled = False # Seems to improve things a lot on AMD - logging.info("Set: torch.backends.cudnn.enabled = False for better AMD performance.") + arch = torch.cuda.get_device_properties(get_torch_device()).gcnArchName + if not (any((a in arch) for a in AMD_RDNA2_AND_OLDER_ARCH)): + torch.backends.cudnn.enabled = os.environ.get("TORCH_BACKENDS_CUDNN_ENABLED", "0").strip().lower() not in { + "0", "off", "false", "disable", "disabled", "no"} + if not torch.backends.cudnn.enabled: + logging.info( + "ComfyUI has set torch.backends.cudnn.enabled to False for better AMD performance. Set environment var TORCH_BACKENDS_CUDDNN_ENABLED=1 to enable it again.") try: rocm_version = tuple(map(int, str(torch.version.hip).split(".")[:2])) From 995c07341ebf7a7f4103646e41b656a71d3c113e Mon Sep 17 00:00:00 2001 From: Christopher Anderson Date: Fri, 24 Oct 2025 18:47:05 +1100 Subject: [PATCH 2/3] Replace TORCH_BACKENDS_CUDNN_ENABLED with TORCH_AMD_CUDNN_ENABLED (more accurate) --- comfy/model_management.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index ff84af4c5..d4dc813f8 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -336,11 +336,11 @@ AMD_RDNA2_AND_OLDER_ARCH = ["gfx1030", "gfx1031", "gfx1010", "gfx1011", "gfx1012 try: arch = torch.cuda.get_device_properties(get_torch_device()).gcnArchName if not (any((a in arch) for a in AMD_RDNA2_AND_OLDER_ARCH)): - torch.backends.cudnn.enabled = os.environ.get("TORCH_BACKENDS_CUDNN_ENABLED", "0").strip().lower() not in { + torch.backends.cudnn.enabled = os.environ.get("TORCH_AMD_CUDNN_ENABLED", "0").strip().lower() not in { "0", "off", "false", "disable", "disabled", "no"} if not torch.backends.cudnn.enabled: logging.info( - "ComfyUI has set torch.backends.cudnn.enabled to False for better AMD performance. Set environment var TORCH_BACKENDS_CUDDNN_ENABLED=1 to enable it again.") + "ComfyUI has set torch.backends.cudnn.enabled to False for better AMD performance. Set environment var TORCH_AMD_CUDDNN_ENABLED=1 to enable it again.") try: rocm_version = tuple(map(int, str(torch.version.hip).split(".")[:2])) From a4eb32a4ffbfd9171c3dd4e8c11a6a7bfbae5668 Mon Sep 17 00:00:00 2001 From: Christopher Anderson Date: Wed, 29 Oct 2025 23:48:26 +1100 Subject: [PATCH 3/3] inserted missing is_amd() check --- comfy/model_management.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index d4dc813f8..f1d7d11b0 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -334,13 +334,14 @@ SUPPORT_FP8_OPS = args.supports_fp8_compute AMD_RDNA2_AND_OLDER_ARCH = ["gfx1030", "gfx1031", "gfx1010", "gfx1011", "gfx1012", "gfx906", "gfx900", "gfx803"] try: - arch = torch.cuda.get_device_properties(get_torch_device()).gcnArchName - if not (any((a in arch) for a in AMD_RDNA2_AND_OLDER_ARCH)): - torch.backends.cudnn.enabled = os.environ.get("TORCH_AMD_CUDNN_ENABLED", "0").strip().lower() not in { - "0", "off", "false", "disable", "disabled", "no"} - if not torch.backends.cudnn.enabled: - logging.info( - "ComfyUI has set torch.backends.cudnn.enabled to False for better AMD performance. Set environment var TORCH_AMD_CUDDNN_ENABLED=1 to enable it again.") + if is_amd(): + arch = torch.cuda.get_device_properties(get_torch_device()).gcnArchName + if not (any((a in arch) for a in AMD_RDNA2_AND_OLDER_ARCH)): + torch.backends.cudnn.enabled = os.environ.get("TORCH_AMD_CUDNN_ENABLED", "0").strip().lower() not in { + "0", "off", "false", "disable", "disabled", "no"} + if not torch.backends.cudnn.enabled: + logging.info( + "ComfyUI has set torch.backends.cudnn.enabled to False for better AMD performance. Set environment var TORCH_AMD_CUDDNN_ENABLED=1 to enable it again.") try: rocm_version = tuple(map(int, str(torch.version.hip).split(".")[:2]))