From 9c71a667904a049975531f2a7dd55f4a8fc92652 Mon Sep 17 00:00:00 2001 From: ComfyUI Wiki Date: Wed, 5 Nov 2025 02:51:53 +0800 Subject: [PATCH 1/3] chore: update workflow templates to v0.2.11 (#10634) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 856e373de..249c36dee 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ comfyui-frontend-package==1.28.8 -comfyui-workflow-templates==0.2.4 +comfyui-workflow-templates==0.2.11 comfyui-embedded-docs==0.3.1 torch torchsde From a389ee01bb7ba5174729906a7f85bd08b5c2cb87 Mon Sep 17 00:00:00 2001 From: rattus <46076784+rattus128@users.noreply.github.com> Date: Wed, 5 Nov 2025 08:14:10 +1000 Subject: [PATCH 2/3] caching: Handle None outputs tuple case (#10637) --- comfy_execution/caching.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/comfy_execution/caching.py b/comfy_execution/caching.py index e077f78b0..326a279fc 100644 --- a/comfy_execution/caching.py +++ b/comfy_execution/caching.py @@ -399,6 +399,8 @@ class RAMPressureCache(LRUCache): ram_usage = RAM_CACHE_DEFAULT_RAM_USAGE def scan_list_for_ram_usage(outputs): nonlocal ram_usage + if outputs is None: + return for output in outputs: if isinstance(output, list): scan_list_for_ram_usage(output) From 7f3e4d486cd77c3ad30eb4714ec18bdaf29e2b5c Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Tue, 4 Nov 2025 14:37:50 -0800 Subject: [PATCH 3/3] Limit amount of pinned memory on windows to prevent issues. (#10638) --- comfy/model_management.py | 32 ++++++++++++++++++++++++++++---- 1 file changed, 28 insertions(+), 4 deletions(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index 79c0dfdb4..0d040e55e 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -1082,8 +1082,20 @@ def cast_to_device(tensor, device, dtype, copy=False): non_blocking = device_supports_non_blocking(device) return cast_to(tensor, dtype=dtype, device=device, non_blocking=non_blocking, copy=copy) + +PINNED_MEMORY = {} +TOTAL_PINNED_MEMORY = 0 +if PerformanceFeature.PinnedMem in args.fast: + if WINDOWS: + MAX_PINNED_MEMORY = get_total_memory(torch.device("cpu")) * 0.45 # Windows limit is apparently 50% + else: + MAX_PINNED_MEMORY = get_total_memory(torch.device("cpu")) * 0.95 +else: + MAX_PINNED_MEMORY = -1 + def pin_memory(tensor): - if PerformanceFeature.PinnedMem not in args.fast: + global TOTAL_PINNED_MEMORY + if MAX_PINNED_MEMORY <= 0: return False if not is_nvidia(): @@ -1092,13 +1104,21 @@ def pin_memory(tensor): if not is_device_cpu(tensor.device): return False - if torch.cuda.cudart().cudaHostRegister(tensor.data_ptr(), tensor.numel() * tensor.element_size(), 1) == 0: + size = tensor.numel() * tensor.element_size() + if (TOTAL_PINNED_MEMORY + size) > MAX_PINNED_MEMORY: + return False + + ptr = tensor.data_ptr() + if torch.cuda.cudart().cudaHostRegister(ptr, size, 1) == 0: + PINNED_MEMORY[ptr] = size + TOTAL_PINNED_MEMORY += size return True return False def unpin_memory(tensor): - if PerformanceFeature.PinnedMem not in args.fast: + global TOTAL_PINNED_MEMORY + if MAX_PINNED_MEMORY <= 0: return False if not is_nvidia(): @@ -1107,7 +1127,11 @@ def unpin_memory(tensor): if not is_device_cpu(tensor.device): return False - if torch.cuda.cudart().cudaHostUnregister(tensor.data_ptr()) == 0: + ptr = tensor.data_ptr() + if torch.cuda.cudart().cudaHostUnregister(ptr) == 0: + TOTAL_PINNED_MEMORY -= PINNED_MEMORY.pop(ptr) + if len(PINNED_MEMORY) == 0: + TOTAL_PINNED_MEMORY = 0 return True return False