From b8fa6827ee26ea5cbd06014991446fe6c052e6fc Mon Sep 17 00:00:00 2001 From: ahmadafzal007 Date: Sat, 7 Feb 2026 00:20:58 +0500 Subject: [PATCH] cuda_malloc: add missing GPUs to blacklist and fix bare except clauses (#940)\n\nAdd GPU models reported in issue #940 that are missing from the\ncudaMallocAsync blacklist:\n- Tesla P40 (Pascal datacenter GPU)\n- Tesla P4 (Pascal datacenter GPU)\n- NVIDIA L4 (reported by users in cloud environments)\n- NVIDIA A10G (AWS GPU, confirmed fix by user)\n- GRID A800D (vGPU mode, reported by user)\n\nAlso replace bare `except:` with `except Exception:` in three locations\nto follow Python best practices and avoid accidentally catching\nKeyboardInterrupt and SystemExit. --- cuda_malloc.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/cuda_malloc.py b/cuda_malloc.py index b2182df37..9bc52125b 100644 --- a/cuda_malloc.py +++ b/cuda_malloc.py @@ -49,13 +49,14 @@ blacklist = {"GeForce GTX TITAN X", "GeForce GTX 980", "GeForce GTX 970", "GeFor "Quadro K1200", "Quadro K2200", "Quadro M500", "Quadro M520", "Quadro M600", "Quadro M620", "Quadro M1000", "Quadro M1200", "Quadro M2000", "Quadro M2200", "Quadro M3000", "Quadro M4000", "Quadro M5000", "Quadro M5500", "Quadro M6000", "GeForce MX110", "GeForce MX130", "GeForce 830M", "GeForce 840M", "GeForce GTX 850M", "GeForce GTX 860M", - "GeForce GTX 1650", "GeForce GTX 1630", "Tesla M4", "Tesla M6", "Tesla M10", "Tesla M40", "Tesla M60" + "GeForce GTX 1650", "GeForce GTX 1630", "Tesla M4", "Tesla M6", "Tesla M10", "Tesla M40", "Tesla M60", + "Tesla P40", "Tesla P4", "NVIDIA L4", "NVIDIA A10G", "GRID A800D", } def cuda_malloc_supported(): try: names = get_gpu_names() - except: + except Exception: names = set() for x in names: if "NVIDIA" in x: @@ -76,7 +77,7 @@ try: module = importlib.util.module_from_spec(spec) spec.loader.exec_module(module) version = module.__version__ -except: +except Exception: pass if not args.cuda_malloc: @@ -84,7 +85,7 @@ if not args.cuda_malloc: if int(version[0]) >= 2 and "+cu" in version: # enable by default for torch version 2.0 and up only on cuda torch if PerformanceFeature.AutoTune not in args.fast: # Autotune has issues with cuda malloc args.cuda_malloc = cuda_malloc_supported() - except: + except Exception: pass if enables_dynamic_vram() and comfy_aimdo.control.init():