Minor updates for worksplit_gpu with comfy-aimdo (#13419)
Some checks failed
Python Linting / Run Ruff (push) Has been cancelled
Python Linting / Run Pylint (push) Has been cancelled
Build package / Build Test (3.10) (push) Has been cancelled
Build package / Build Test (3.11) (push) Has been cancelled
Build package / Build Test (3.12) (push) Has been cancelled
Build package / Build Test (3.13) (push) Has been cancelled
Build package / Build Test (3.14) (push) Has been cancelled

* main: init all visible cuda devices in aimdo

* mp: call vbars_analyze for the GPU in question

* requirements: bump aimdo to pre-release version
This commit is contained in:
rattus 2026-04-16 15:49:01 +10:00 committed by GitHub
parent 48deb15c0e
commit f0d550bd02
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 5 additions and 4 deletions

View File

@ -319,7 +319,8 @@ class ModelPatcher:
#than pays for CFG. So return everything both torch and Aimdo could give us #than pays for CFG. So return everything both torch and Aimdo could give us
aimdo_mem = 0 aimdo_mem = 0
if comfy.memory_management.aimdo_enabled: if comfy.memory_management.aimdo_enabled:
aimdo_mem = comfy_aimdo.model_vbar.vbars_analyze() aimdo_device = device.index if getattr(device, "type", None) == "cuda" else None
aimdo_mem = comfy_aimdo.model_vbar.vbars_analyze(aimdo_device)
return comfy.model_management.get_free_memory(device) + aimdo_mem return comfy.model_management.get_free_memory(device) + aimdo_mem
def get_clone_model_override(self): def get_clone_model_override(self):

View File

@ -192,7 +192,7 @@ import gc
if 'torch' in sys.modules: if 'torch' in sys.modules:
logging.warning("WARNING: Potential Error in code: Torch already imported, torch should never be imported before this point.") logging.warning("WARNING: Potential Error in code: Torch already imported, torch should never be imported before this point.")
import torch
import comfy.utils import comfy.utils
import execution import execution
@ -210,7 +210,7 @@ import comfy.model_patcher
if args.enable_dynamic_vram or (enables_dynamic_vram() and comfy.model_management.is_nvidia() and not comfy.model_management.is_wsl()): if args.enable_dynamic_vram or (enables_dynamic_vram() and comfy.model_management.is_nvidia() and not comfy.model_management.is_wsl()):
if (not args.enable_dynamic_vram) and (comfy.model_management.torch_version_numeric < (2, 8)): if (not args.enable_dynamic_vram) and (comfy.model_management.torch_version_numeric < (2, 8)):
logging.warning("Unsupported Pytorch detected. DynamicVRAM support requires Pytorch version 2.8 or later. Falling back to legacy ModelPatcher. VRAM estimates may be unreliable especially on Windows") logging.warning("Unsupported Pytorch detected. DynamicVRAM support requires Pytorch version 2.8 or later. Falling back to legacy ModelPatcher. VRAM estimates may be unreliable especially on Windows")
elif comfy_aimdo.control.init_device(comfy.model_management.get_torch_device().index): elif comfy_aimdo.control.init_devices(range(torch.cuda.device_count())):
if args.verbose == 'DEBUG': if args.verbose == 'DEBUG':
comfy_aimdo.control.set_log_debug() comfy_aimdo.control.set_log_debug()
elif args.verbose == 'CRITICAL': elif args.verbose == 'CRITICAL':

View File

@ -23,7 +23,7 @@ SQLAlchemy
filelock filelock
av>=14.2.0 av>=14.2.0
comfy-kitchen>=0.2.8 comfy-kitchen>=0.2.8
comfy-aimdo>=0.2.12 comfy-aimdo==0.0.213
requests requests
simpleeval>=1.0.0 simpleeval>=1.0.0
blake3 blake3