mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2026-04-14 04:22:31 +08:00
Add a supports_fp64 function. (#13368)
This commit is contained in:
parent
a2840e7552
commit
55ebd287ee
@ -16,7 +16,7 @@ def attention(q: Tensor, k: Tensor, v: Tensor, pe: Tensor, mask=None, transforme
|
|||||||
|
|
||||||
def rope(pos: Tensor, dim: int, theta: int) -> Tensor:
|
def rope(pos: Tensor, dim: int, theta: int) -> Tensor:
|
||||||
assert dim % 2 == 0
|
assert dim % 2 == 0
|
||||||
if comfy.model_management.is_device_mps(pos.device) or comfy.model_management.is_intel_xpu() or comfy.model_management.is_directml_enabled():
|
if not comfy.model_management.supports_fp64(pos.device):
|
||||||
device = torch.device("cpu")
|
device = torch.device("cpu")
|
||||||
else:
|
else:
|
||||||
device = pos.device
|
device = pos.device
|
||||||
|
|||||||
@ -1732,6 +1732,21 @@ def supports_mxfp8_compute(device=None):
|
|||||||
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
def supports_fp64(device=None):
|
||||||
|
if is_device_mps(device):
|
||||||
|
return False
|
||||||
|
|
||||||
|
if is_intel_xpu():
|
||||||
|
return False
|
||||||
|
|
||||||
|
if is_directml_enabled():
|
||||||
|
return False
|
||||||
|
|
||||||
|
if is_ixuca():
|
||||||
|
return False
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
def extended_fp16_support():
|
def extended_fp16_support():
|
||||||
# TODO: check why some models work with fp16 on newer torch versions but not on older
|
# TODO: check why some models work with fp16 on newer torch versions but not on older
|
||||||
if torch_version_numeric < (2, 7):
|
if torch_version_numeric < (2, 7):
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user