main: Rework aimdo into process

Be more tolerant of unsupported platforms and fallback properly.
Fixes crash when cuda is not installed at all.
This commit is contained in:
Rattus 2026-01-15 17:40:32 +10:00
parent 2e68a7638c
commit 1269a55b6d
3 changed files with 25 additions and 32 deletions

View File

@ -1,7 +1,5 @@
from comfy.quant_ops import QuantizedTensor from comfy.quant_ops import QuantizedTensor
import comfy_aimdo.torch
def vram_aligned_size(tensor): def vram_aligned_size(tensor):
if isinstance(tensor, list): if isinstance(tensor, list):
return sum([vram_aligned_size(t) for t in tensor]) return sum([vram_aligned_size(t) for t in tensor])
@ -51,4 +49,4 @@ def interpret_gathered_like(tensors, gathered):
return dest_views return dest_views
aimdo_allocator = comfy_aimdo.torch.CUDAPluggableAllocator() aimdo_allocator = None

View File

@ -87,7 +87,7 @@ if not args.cuda_malloc:
except: except:
pass pass
if enables_dynamic_vram() and comfy_aimdo.control.lib is not None: if enables_dynamic_vram() and comfy_aimdo.control.init(0):
args.cuda_malloc = False args.cuda_malloc = False
os.environ['PYTORCH_CUDA_ALLOC_CONF'] = "" os.environ['PYTORCH_CUDA_ALLOC_CONF'] = ""

51
main.py
View File

@ -174,29 +174,6 @@ if 'torch' in sys.modules:
logging.warning("WARNING: Potential Error in code: Torch already imported, torch should never be imported before this point.") logging.warning("WARNING: Potential Error in code: Torch already imported, torch should never be imported before this point.")
has_aimdo = False
import comfy_aimdo.control
if comfy_aimdo.control.lib is not None:
if args.verbose == 'DEBUG':
comfy_aimdo.control.set_log_debug()
elif args.verbose == 'CRITICAL':
comfy_aimdo.control.set_log_critical()
elif args.verbose == 'ERROR':
comfy_aimdo.control.set_log_error()
elif args.verbose == 'WARNING':
comfy_aimdo.control.set_log_warning()
else: #INFO
comfy_aimdo.control.set_log_info()
if enables_dynamic_vram():
logging.info("DynamicVRAM support detected and enabled")
has_aimdo = True
else:
if enables_dynamic_vram():
logging.info("No native comfy-aimdo install detected. Falling back to legacy ModelPatcher. VRAM estimates may be unreliable especially on Windows")
import comfy.utils import comfy.utils
import execution import execution
@ -211,11 +188,29 @@ import hook_breaker_ac10a0
import comfy.memory_management import comfy.memory_management
import comfy.model_patcher import comfy.model_patcher
if has_aimdo: import comfy_aimdo.control
comfy.model_patcher.CoreModelPatcher = comfy.model_patcher.ModelPatcherDynamic import comfy_aimdo.torch
comfy_aimdo.control.init_vram_guard(comfy.model_management.get_torch_device().index)
else: if enables_dynamic_vram():
comfy.memory_management.aimdo_allocator = None if comfy_aimdo.control.init_device(comfy.model_management.get_torch_device().index):
if args.verbose == 'DEBUG':
comfy_aimdo.control.set_log_debug()
elif args.verbose == 'CRITICAL':
comfy_aimdo.control.set_log_critical()
elif args.verbose == 'ERROR':
comfy_aimdo.control.set_log_error()
elif args.verbose == 'WARNING':
comfy_aimdo.control.set_log_warning()
else: #INFO
comfy_aimdo.control.set_log_info()
comfy.model_patcher.CoreModelPatcher = comfy.model_patcher.ModelPatcherDynamic
comfy.memory_management.aimdo_allocator = comfy_aimdo.torch.get_torch_allocator()
logging.info("DynamicVRAM support detected and enabled")
else:
logging.info("No working comfy-aimdo install detected. DynamicVRAM support disabled. Falling back to legacy ModelPatcher. VRAM estimates may be unreliable especially on Windows")
comfy.memory_management.aimdo_allocator = None
def cuda_malloc_warning(): def cuda_malloc_warning():
device = comfy.model_management.get_torch_device() device = comfy.model_management.get_torch_device()