diff --git a/comfy/model_management.py b/comfy/model_management.py index 449295a1a..be5f7765e 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -1122,7 +1122,7 @@ def should_use_bf16(device=None, model_params=0, prioritize_performance=True, ma bf16_works = torch.cuda.is_bf16_supported() - if bf16_works or manual_cast: + if bf16_works and manual_cast: free_model_memory = maximum_vram_for_weights(device) if (not prioritize_performance) or model_params * 4 > free_model_memory: return True