From 73e87509dc93652277461450f87ce09f71388c21 Mon Sep 17 00:00:00 2001 From: Soulreaver90 Date: Wed, 11 Feb 2026 17:23:39 -0500 Subject: [PATCH] Minimum viable dtype for lora math is bf16 --- comfy/model_patcher.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/comfy/model_patcher.py b/comfy/model_patcher.py index f278fccac..81baf85c0 100644 --- a/comfy/model_patcher.py +++ b/comfy/model_patcher.py @@ -122,6 +122,11 @@ class LowVramPatch: self.set_func = set_func def __call__(self, weight): + if weight.dtype == torch.float8_e4m3fn or weight.dtype == torch.float8_e5m2: + temp_weight = weight.to(torch.bfloat16) + patched_weight = comfy.lora.calculate_weight(self.patches[self.key], temp_weight, self.key, intermediate_dtype=torch.bfloat16) + return patched_weight.to(weight.dtype) + return comfy.lora.calculate_weight(self.patches[self.key], weight, self.key, intermediate_dtype=weight.dtype) LOWVRAM_PATCH_ESTIMATE_MATH_FACTOR = 2