This commit is contained in:
snwchd71 2026-04-12 09:24:59 +02:00 committed by GitHub
commit 948b81fe42
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 13 additions and 2 deletions

View File

@ -460,7 +460,10 @@ if cpu_state != CPUState.GPU:
vram_state = VRAMState.DISABLED
if cpu_state == CPUState.MPS:
vram_state = VRAMState.SHARED
if set_vram_to in (VRAMState.LOW_VRAM, VRAMState.NO_VRAM):
vram_state = set_vram_to
else:
vram_state = VRAMState.SHARED
logging.info(f"Set vram state to: {vram_state.name}")
@ -1771,7 +1774,9 @@ def lora_compute_dtype(device):
def synchronize():
if cpu_mode():
return
if is_intel_xpu():
if mps_mode():
torch.mps.synchronize()
elif is_intel_xpu():
torch.xpu.synchronize()
elif torch.cuda.is_available():
torch.cuda.synchronize()
@ -1781,6 +1786,7 @@ def soft_empty_cache(force=False):
return
global cpu_state
if cpu_state == CPUState.MPS:
torch.mps.synchronize()
torch.mps.empty_cache()
elif is_intel_xpu():
torch.xpu.empty_cache()

View File

@ -780,6 +780,11 @@ class PromptExecutor:
if self.cache_type == CacheType.RAM_PRESSURE:
comfy.model_management.free_memory(0, None, pins_required=ram_headroom, ram_required=ram_headroom)
comfy.memory_management.extra_ram_release(ram_headroom)
elif comfy.model_management.mps_mode():
mem_free_total, mem_free_torch = comfy.model_management.get_free_memory(
comfy.model_management.get_torch_device(), torch_free_too=True)
if mem_free_torch < mem_free_total * 0.25:
comfy.model_management.soft_empty_cache()
else:
# Only execute when the while-loop ends without break
# Send cached UI for intermediate output nodes that weren't executed