mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2026-02-03 10:10:20 +08:00
mm: Dont GPU load models
Aimdo will do this on demand as 0 copy. Remove the special case for vram > ram.
This commit is contained in:
parent
db99ab48c2
commit
26dc3a20c6
@ -823,7 +823,7 @@ def unet_inital_load_device(parameters, dtype):
|
||||
|
||||
mem_dev = get_free_memory(torch_dev)
|
||||
mem_cpu = get_free_memory(cpu_dev)
|
||||
if mem_dev > mem_cpu and model_size < mem_dev:
|
||||
if mem_dev > mem_cpu and model_size < mem_dev and comfy.memory_management.aimdo_allocator is None:
|
||||
return torch_dev
|
||||
else:
|
||||
return cpu_dev
|
||||
|
||||
Loading…
Reference in New Issue
Block a user