diff --git a/comfy/ldm/modules/attention.py b/comfy/ldm/modules/attention.py index a68cb8439..37b2a8ee3 100644 --- a/comfy/ldm/modules/attention.py +++ b/comfy/ldm/modules/attention.py @@ -336,6 +336,10 @@ def attention_split(q, k, v, heads, mask=None, attn_precision=None, skip_reshape steps = 1 + if mem_free_total <= 0: + # DirectML doesn't expose free VRAM — assume 4GB free as a safe fallback for 6GB cards + mem_free_total = 4 * (1024 ** 3) + if mem_required > mem_free_total: steps = 2**(math.ceil(math.log(mem_required / mem_free_total, 2))) # print(f"Expected tensor size:{tensor_size/gb:0.1f}GB, cuda free:{mem_free_cuda/gb:0.1f}GB " diff --git a/comfy/model_management.py b/comfy/model_management.py index 6b4d4b770..a14627842 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -544,7 +544,11 @@ def module_mmap_residency(module, free=False): for k in sd: t = sd[k] module_mem += t.nbytes - storage = t._qdata.untyped_storage() if isinstance(t, comfy.quant_ops.QuantizedTensor) else t.untyped_storage() + try: + storage = t._qdata.untyped_storage() if isinstance(t, comfy.quant_ops.QuantizedTensor) else t.untyped_storage() + except NotImplementedError: + # DirectML (AMD) tensors are opaque — no host storage to inspect; skip mmap tracking + continue if not getattr(storage, "_comfy_tensor_mmap_touched", False): continue mmap_touched_mem += t.nbytes @@ -1328,7 +1332,12 @@ def cast_to_gathered(tensors, r, non_blocking=False, stream=None): continue if comfy.memory_management.read_tensor_file_slice_into(tensor, dest_view): continue - storage = tensor._qdata.untyped_storage() if isinstance(tensor, comfy.quant_ops.QuantizedTensor) else tensor.untyped_storage() + try: + storage = tensor._qdata.untyped_storage() if isinstance(tensor, comfy.quant_ops.QuantizedTensor) else tensor.untyped_storage() + except NotImplementedError: + # DirectML tensors are opaque — skip mmap marking, just copy + dest_view.copy_(tensor, non_blocking=non_blocking) + continue if hasattr(storage, "_comfy_tensor_mmap_touched"): storage._comfy_tensor_mmap_touched = True dest_view.copy_(tensor, non_blocking=non_blocking)