Merge remote-tracking branch 'origin/worksplit-multigpu' into worksplit-multigpu-wip
Some checks are pending
Python Linting / Run Ruff (push) Waiting to run
Python Linting / Run Pylint (push) Waiting to run
Build package / Build Test (3.13) (push) Waiting to run
Build package / Build Test (3.10) (push) Waiting to run
Build package / Build Test (3.11) (push) Waiting to run
Build package / Build Test (3.12) (push) Waiting to run
Build package / Build Test (3.14) (push) Waiting to run

This commit is contained in:
Jedrzej Kosinski 2026-04-20 00:30:25 -07:00
commit 3e7c01fb5b
3 changed files with 5 additions and 4 deletions

View File

@ -319,7 +319,8 @@ class ModelPatcher:
#than pays for CFG. So return everything both torch and Aimdo could give us #than pays for CFG. So return everything both torch and Aimdo could give us
aimdo_mem = 0 aimdo_mem = 0
if comfy.memory_management.aimdo_enabled: if comfy.memory_management.aimdo_enabled:
aimdo_mem = comfy_aimdo.model_vbar.vbars_analyze() aimdo_device = device.index if getattr(device, "type", None) == "cuda" else None
aimdo_mem = comfy_aimdo.model_vbar.vbars_analyze(aimdo_device)
return comfy.model_management.get_free_memory(device) + aimdo_mem return comfy.model_management.get_free_memory(device) + aimdo_mem
def get_clone_model_override(self): def get_clone_model_override(self):

View File

@ -192,7 +192,7 @@ import gc
if 'torch' in sys.modules: if 'torch' in sys.modules:
logging.warning("WARNING: Potential Error in code: Torch already imported, torch should never be imported before this point.") logging.warning("WARNING: Potential Error in code: Torch already imported, torch should never be imported before this point.")
import torch
import comfy.utils import comfy.utils
import execution import execution
@ -210,7 +210,7 @@ import comfy.model_patcher
if args.enable_dynamic_vram or (enables_dynamic_vram() and comfy.model_management.is_nvidia() and not comfy.model_management.is_wsl()): if args.enable_dynamic_vram or (enables_dynamic_vram() and comfy.model_management.is_nvidia() and not comfy.model_management.is_wsl()):
if (not args.enable_dynamic_vram) and (comfy.model_management.torch_version_numeric < (2, 8)): if (not args.enable_dynamic_vram) and (comfy.model_management.torch_version_numeric < (2, 8)):
logging.warning("Unsupported Pytorch detected. DynamicVRAM support requires Pytorch version 2.8 or later. Falling back to legacy ModelPatcher. VRAM estimates may be unreliable especially on Windows") logging.warning("Unsupported Pytorch detected. DynamicVRAM support requires Pytorch version 2.8 or later. Falling back to legacy ModelPatcher. VRAM estimates may be unreliable especially on Windows")
elif comfy_aimdo.control.init_device(comfy.model_management.get_torch_device().index): elif comfy_aimdo.control.init_devices(range(torch.cuda.device_count())):
if args.verbose == 'DEBUG': if args.verbose == 'DEBUG':
comfy_aimdo.control.set_log_debug() comfy_aimdo.control.set_log_debug()
elif args.verbose == 'CRITICAL': elif args.verbose == 'CRITICAL':

View File

@ -23,7 +23,7 @@ SQLAlchemy
filelock filelock
av>=14.2.0 av>=14.2.0
comfy-kitchen>=0.2.8 comfy-kitchen>=0.2.8
comfy-aimdo>=0.2.12 comfy-aimdo==0.0.213
requests requests
simpleeval>=1.0.0 simpleeval>=1.0.0
blake3 blake3