mm: Dont GPU load models

Aimdo will do this on demand as 0 copy. Remove the special case for
vram > ram.
This commit is contained in:
Rattus 2026-01-27 14:10:54 +10:00
parent db99ab48c2
commit 26dc3a20c6

View File

@ -823,7 +823,7 @@ def unet_inital_load_device(parameters, dtype):
mem_dev = get_free_memory(torch_dev)
mem_cpu = get_free_memory(cpu_dev)
if mem_dev > mem_cpu and model_size < mem_dev:
if mem_dev > mem_cpu and model_size < mem_dev and comfy.memory_management.aimdo_allocator is None:
return torch_dev
else:
return cpu_dev