Compare commits

...

3 Commits

Author SHA1 Message Date
Silver
e42e29579b
Merge 6fd6ffd023 into dd86b15521 2026-02-02 10:13:21 +01:00
Silver
6fd6ffd023
Merge branch 'comfyanonymous:master' into fp8compute_disable 2025-04-10 18:16:20 +02:00
silveroxides
a6b22bd779 Add launch argument for disabling fp8 compute 2025-04-08 19:27:56 +02:00
2 changed files with 3 additions and 0 deletions

View File

@ -151,6 +151,7 @@ parser.add_argument("--force-non-blocking", action="store_true", help="Force Com
parser.add_argument("--default-hashing-function", type=str, choices=['md5', 'sha1', 'sha256', 'sha512'], default='sha256', help="Allows you to choose the hash function to use for duplicate filename / contents comparison. Default is sha256.")
parser.add_argument("--disable-fp8-compute", action="store_true", help="Prevent ComfyUI from activating fp8 compute in Nvidia cards that support it. Can prevent some issues with some models not suitable for fp8 compute.")
parser.add_argument("--disable-smart-memory", action="store_true", help="Force ComfyUI to agressively offload to regular ram instead of keeping models in vram when it can.")
parser.add_argument("--deterministic", action="store_true", help="Make pytorch use slower deterministic algorithms when it can. Note that this might not make images deterministic in all cases.")

View File

@ -1639,6 +1639,8 @@ def supports_fp8_compute(device=None):
if not is_nvidia():
return False
if args.disable_fp8_compute:
return False
props = torch.cuda.get_device_properties(device)
if props.major >= 9: