From 26dc3a20c68634955b41d55107f6b4d65ad70735 Mon Sep 17 00:00:00 2001 From: Rattus Date: Tue, 27 Jan 2026 14:10:54 +1000 Subject: [PATCH] mm: Dont GPU load models Aimdo will do this on demand as 0 copy. Remove the special case for vram > ram. --- comfy/model_management.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index 70c2d5e22..412752503 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -823,7 +823,7 @@ def unet_inital_load_device(parameters, dtype): mem_dev = get_free_memory(torch_dev) mem_cpu = get_free_memory(cpu_dev) - if mem_dev > mem_cpu and model_size < mem_dev: + if mem_dev > mem_cpu and model_size < mem_dev and comfy.memory_management.aimdo_allocator is None: return torch_dev else: return cpu_dev