From 0264149f4671649f5854f6669ecc538618a7c388 Mon Sep 17 00:00:00 2001 From: KBRASK Date: Sun, 20 Apr 2025 15:47:18 +0800 Subject: [PATCH] To enable support for non-CUDA devices, substitute torch.cuda.empty_cache() with model_management.soft_empty_cache(). --- comfy/ldm/modules/diffusionmodules/model.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/comfy/ldm/modules/diffusionmodules/model.py b/comfy/ldm/modules/diffusionmodules/model.py index c59b03725..0b2d92e94 100644 --- a/comfy/ldm/modules/diffusionmodules/model.py +++ b/comfy/ldm/modules/diffusionmodules/model.py @@ -721,13 +721,13 @@ class Decoder(nn.Module): if len(self.up[i_level].attn) > 0: h_new = self.up[i_level].attn[i_block](h_new, **kwargs) del h - torch.cuda.empty_cache() + model_management.soft_empty_cache() h = h_new if i_level != 0: h_new = self.up[i_level].upsample(h) del h - torch.cuda.empty_cache() + model_management.soft_empty_cache() h = h_new # end