From 1269a55b6d26d25a2e5a593967a26eb09a8f7f46 Mon Sep 17 00:00:00 2001 From: Rattus Date: Thu, 15 Jan 2026 17:40:32 +1000 Subject: [PATCH] main: Rework aimdo into process Be more tolerant of unsupported platforms and fallback properly. Fixes crash when cuda is not installed at all. --- comfy/memory_management.py | 4 +-- cuda_malloc.py | 2 +- main.py | 51 +++++++++++++++++--------------------- 3 files changed, 25 insertions(+), 32 deletions(-) diff --git a/comfy/memory_management.py b/comfy/memory_management.py index 4169e853c..3765de0a1 100644 --- a/comfy/memory_management.py +++ b/comfy/memory_management.py @@ -1,7 +1,5 @@ from comfy.quant_ops import QuantizedTensor -import comfy_aimdo.torch - def vram_aligned_size(tensor): if isinstance(tensor, list): return sum([vram_aligned_size(t) for t in tensor]) @@ -51,4 +49,4 @@ def interpret_gathered_like(tensors, gathered): return dest_views -aimdo_allocator = comfy_aimdo.torch.CUDAPluggableAllocator() +aimdo_allocator = None diff --git a/cuda_malloc.py b/cuda_malloc.py index 3c7c8593e..d08162cbc 100644 --- a/cuda_malloc.py +++ b/cuda_malloc.py @@ -87,7 +87,7 @@ if not args.cuda_malloc: except: pass -if enables_dynamic_vram() and comfy_aimdo.control.lib is not None: +if enables_dynamic_vram() and comfy_aimdo.control.init(0): args.cuda_malloc = False os.environ['PYTORCH_CUDA_ALLOC_CONF'] = "" diff --git a/main.py b/main.py index 52f11bfff..b8c951375 100644 --- a/main.py +++ b/main.py @@ -174,29 +174,6 @@ if 'torch' in sys.modules: logging.warning("WARNING: Potential Error in code: Torch already imported, torch should never be imported before this point.") -has_aimdo = False - -import comfy_aimdo.control - -if comfy_aimdo.control.lib is not None: - if args.verbose == 'DEBUG': - comfy_aimdo.control.set_log_debug() - elif args.verbose == 'CRITICAL': - comfy_aimdo.control.set_log_critical() - elif args.verbose == 'ERROR': - comfy_aimdo.control.set_log_error() - elif args.verbose == 'WARNING': - comfy_aimdo.control.set_log_warning() - else: #INFO - comfy_aimdo.control.set_log_info() - - if enables_dynamic_vram(): - logging.info("DynamicVRAM support detected and enabled") - has_aimdo = True -else: - if enables_dynamic_vram(): - logging.info("No native comfy-aimdo install detected. Falling back to legacy ModelPatcher. VRAM estimates may be unreliable especially on Windows") - import comfy.utils import execution @@ -211,11 +188,29 @@ import hook_breaker_ac10a0 import comfy.memory_management import comfy.model_patcher -if has_aimdo: - comfy.model_patcher.CoreModelPatcher = comfy.model_patcher.ModelPatcherDynamic - comfy_aimdo.control.init_vram_guard(comfy.model_management.get_torch_device().index) -else: - comfy.memory_management.aimdo_allocator = None +import comfy_aimdo.control +import comfy_aimdo.torch + +if enables_dynamic_vram(): + if comfy_aimdo.control.init_device(comfy.model_management.get_torch_device().index): + if args.verbose == 'DEBUG': + comfy_aimdo.control.set_log_debug() + elif args.verbose == 'CRITICAL': + comfy_aimdo.control.set_log_critical() + elif args.verbose == 'ERROR': + comfy_aimdo.control.set_log_error() + elif args.verbose == 'WARNING': + comfy_aimdo.control.set_log_warning() + else: #INFO + comfy_aimdo.control.set_log_info() + + comfy.model_patcher.CoreModelPatcher = comfy.model_patcher.ModelPatcherDynamic + comfy.memory_management.aimdo_allocator = comfy_aimdo.torch.get_torch_allocator() + logging.info("DynamicVRAM support detected and enabled") + else: + logging.info("No working comfy-aimdo install detected. DynamicVRAM support disabled. Falling back to legacy ModelPatcher. VRAM estimates may be unreliable especially on Windows") + comfy.memory_management.aimdo_allocator = None + def cuda_malloc_warning(): device = comfy.model_management.get_torch_device()