mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2026-03-30 05:23:37 +08:00
cuda_malloc: add missing GPUs to blacklist and fix bare except clauses (#940)\n\nAdd GPU models reported in issue #940 that are missing from the\ncudaMallocAsync blacklist:\n- Tesla P40 (Pascal datacenter GPU)\n- Tesla P4 (Pascal datacenter GPU)\n- NVIDIA L4 (reported by users in cloud environments)\n- NVIDIA A10G (AWS GPU, confirmed fix by user)\n- GRID A800D (vGPU mode, reported by user)\n\nAlso replace bare except: with except Exception: in three locations\nto follow Python best practices and avoid accidentally catching\nKeyboardInterrupt and SystemExit.
This commit is contained in:
parent
a1c101f861
commit
b8fa6827ee
@ -49,13 +49,14 @@ blacklist = {"GeForce GTX TITAN X", "GeForce GTX 980", "GeForce GTX 970", "GeFor
|
|||||||
"Quadro K1200", "Quadro K2200", "Quadro M500", "Quadro M520", "Quadro M600", "Quadro M620", "Quadro M1000",
|
"Quadro K1200", "Quadro K2200", "Quadro M500", "Quadro M520", "Quadro M600", "Quadro M620", "Quadro M1000",
|
||||||
"Quadro M1200", "Quadro M2000", "Quadro M2200", "Quadro M3000", "Quadro M4000", "Quadro M5000", "Quadro M5500", "Quadro M6000",
|
"Quadro M1200", "Quadro M2000", "Quadro M2200", "Quadro M3000", "Quadro M4000", "Quadro M5000", "Quadro M5500", "Quadro M6000",
|
||||||
"GeForce MX110", "GeForce MX130", "GeForce 830M", "GeForce 840M", "GeForce GTX 850M", "GeForce GTX 860M",
|
"GeForce MX110", "GeForce MX130", "GeForce 830M", "GeForce 840M", "GeForce GTX 850M", "GeForce GTX 860M",
|
||||||
"GeForce GTX 1650", "GeForce GTX 1630", "Tesla M4", "Tesla M6", "Tesla M10", "Tesla M40", "Tesla M60"
|
"GeForce GTX 1650", "GeForce GTX 1630", "Tesla M4", "Tesla M6", "Tesla M10", "Tesla M40", "Tesla M60",
|
||||||
|
"Tesla P40", "Tesla P4", "NVIDIA L4", "NVIDIA A10G", "GRID A800D",
|
||||||
}
|
}
|
||||||
|
|
||||||
def cuda_malloc_supported():
|
def cuda_malloc_supported():
|
||||||
try:
|
try:
|
||||||
names = get_gpu_names()
|
names = get_gpu_names()
|
||||||
except:
|
except Exception:
|
||||||
names = set()
|
names = set()
|
||||||
for x in names:
|
for x in names:
|
||||||
if "NVIDIA" in x:
|
if "NVIDIA" in x:
|
||||||
@ -76,7 +77,7 @@ try:
|
|||||||
module = importlib.util.module_from_spec(spec)
|
module = importlib.util.module_from_spec(spec)
|
||||||
spec.loader.exec_module(module)
|
spec.loader.exec_module(module)
|
||||||
version = module.__version__
|
version = module.__version__
|
||||||
except:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
if not args.cuda_malloc:
|
if not args.cuda_malloc:
|
||||||
@ -84,7 +85,7 @@ if not args.cuda_malloc:
|
|||||||
if int(version[0]) >= 2 and "+cu" in version: # enable by default for torch version 2.0 and up only on cuda torch
|
if int(version[0]) >= 2 and "+cu" in version: # enable by default for torch version 2.0 and up only on cuda torch
|
||||||
if PerformanceFeature.AutoTune not in args.fast: # Autotune has issues with cuda malloc
|
if PerformanceFeature.AutoTune not in args.fast: # Autotune has issues with cuda malloc
|
||||||
args.cuda_malloc = cuda_malloc_supported()
|
args.cuda_malloc = cuda_malloc_supported()
|
||||||
except:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
if enables_dynamic_vram() and comfy_aimdo.control.init():
|
if enables_dynamic_vram() and comfy_aimdo.control.init():
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user