To enable support for non-CUDA devices, substitute torch.cuda.empty_cache() with model_management.soft_empty_cache().

This commit is contained in:
KBRASK 2025-04-20 15:47:18 +08:00
parent 4d4e6a05fa
commit 0264149f46

View File

@ -721,13 +721,13 @@ class Decoder(nn.Module):
if len(self.up[i_level].attn) > 0:
h_new = self.up[i_level].attn[i_block](h_new, **kwargs)
del h
torch.cuda.empty_cache()
model_management.soft_empty_cache()
h = h_new
if i_level != 0:
h_new = self.up[i_level].upsample(h)
del h
torch.cuda.empty_cache()
model_management.soft_empty_cache()
h = h_new
# end