From 7985ff88b9a7099378b5f2026bee5da63d3fc53f Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 26 Aug 2024 12:33:57 -0400 Subject: [PATCH 1/2] Use less memory in float8 lora patching by doing calculations in fp16. --- comfy/float.py | 45 +++++++++++++++++++++++---------------------- 1 file changed, 23 insertions(+), 22 deletions(-) diff --git a/comfy/float.py b/comfy/float.py index 1dbdafd59..eb4a9b26e 100644 --- a/comfy/float.py +++ b/comfy/float.py @@ -1,4 +1,15 @@ import torch +import math + +def calc_mantissa(abs_x, exponent, normal_mask, MANTISSA_BITS, EXPONENT_BIAS): + mantissa_scaled = torch.where( + normal_mask, + (abs_x / (2.0 ** (exponent - EXPONENT_BIAS)) - 1.0) * (2**MANTISSA_BITS), + (abs_x / (2.0 ** (-EXPONENT_BIAS + 1 - MANTISSA_BITS))) + ) + + mantissa_scaled += torch.rand_like(mantissa_scaled) + return mantissa_scaled.floor() / (2**MANTISSA_BITS) #Not 100% sure about this def manual_stochastic_round_to_float8(x, dtype): @@ -9,40 +20,30 @@ def manual_stochastic_round_to_float8(x, dtype): else: raise ValueError("Unsupported dtype") + x = x.half() sign = torch.sign(x) abs_x = x.abs() + sign = torch.where(abs_x == 0, 0, sign) # Combine exponent calculation and clamping exponent = torch.clamp( - torch.floor(torch.log2(abs_x)).to(torch.int32) + EXPONENT_BIAS, + torch.floor(torch.log2(abs_x)) + EXPONENT_BIAS, 0, 2**EXPONENT_BITS - 1 ) # Combine mantissa calculation and rounding - # min_normal = 2.0 ** (-EXPONENT_BIAS + 1) - # zero_mask = (abs_x == 0) - # subnormal_mask = (exponent == 0) & (abs_x != 0) normal_mask = ~(exponent == 0) - mantissa_scaled = torch.where( - normal_mask, - (abs_x / (2.0 ** (exponent - EXPONENT_BIAS)) - 1.0) * (2**MANTISSA_BITS), - (abs_x / (2.0 ** (-EXPONENT_BIAS + 1 - MANTISSA_BITS))) - ) - mantissa_floor = mantissa_scaled.floor() - mantissa = torch.where( - torch.rand_like(mantissa_scaled) < (mantissa_scaled - mantissa_floor), - (mantissa_floor + 1) / (2**MANTISSA_BITS), - mantissa_floor / (2**MANTISSA_BITS) - ) - result = torch.where( - normal_mask, - sign * (2.0 ** (exponent - EXPONENT_BIAS)) * (1.0 + mantissa), - sign * (2.0 ** (-EXPONENT_BIAS + 1)) * mantissa - ) + abs_x[:] = calc_mantissa(abs_x, exponent, normal_mask, MANTISSA_BITS, EXPONENT_BIAS) - result = torch.where(abs_x == 0, 0, result) - return result.to(dtype=dtype) + sign *= torch.where( + normal_mask, + (2.0 ** (exponent - EXPONENT_BIAS)) * (1.0 + abs_x), + (2.0 ** (-EXPONENT_BIAS + 1)) * abs_x + ) + del abs_x + + return sign.to(dtype=dtype) From 2ca8f6e23d41a8e4cf064ca9f2ebe5c768e155dc Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 26 Aug 2024 15:12:06 -0400 Subject: [PATCH 2/2] Make the stochastic fp8 rounding reproducible. --- comfy/float.py | 14 ++++++++------ comfy/model_patcher.py | 14 +++++++++++++- 2 files changed, 21 insertions(+), 7 deletions(-) diff --git a/comfy/float.py b/comfy/float.py index eb4a9b26e..57fd07099 100644 --- a/comfy/float.py +++ b/comfy/float.py @@ -1,18 +1,18 @@ import torch import math -def calc_mantissa(abs_x, exponent, normal_mask, MANTISSA_BITS, EXPONENT_BIAS): +def calc_mantissa(abs_x, exponent, normal_mask, MANTISSA_BITS, EXPONENT_BIAS, generator=None): mantissa_scaled = torch.where( normal_mask, (abs_x / (2.0 ** (exponent - EXPONENT_BIAS)) - 1.0) * (2**MANTISSA_BITS), (abs_x / (2.0 ** (-EXPONENT_BIAS + 1 - MANTISSA_BITS))) ) - mantissa_scaled += torch.rand_like(mantissa_scaled) + mantissa_scaled += torch.rand(mantissa_scaled.size(), dtype=mantissa_scaled.dtype, layout=mantissa_scaled.layout, device=mantissa_scaled.device, generator=generator) return mantissa_scaled.floor() / (2**MANTISSA_BITS) #Not 100% sure about this -def manual_stochastic_round_to_float8(x, dtype): +def manual_stochastic_round_to_float8(x, dtype, generator=None): if dtype == torch.float8_e4m3fn: EXPONENT_BITS, MANTISSA_BITS, EXPONENT_BIAS = 4, 3, 7 elif dtype == torch.float8_e5m2: @@ -34,7 +34,7 @@ def manual_stochastic_round_to_float8(x, dtype): # Combine mantissa calculation and rounding normal_mask = ~(exponent == 0) - abs_x[:] = calc_mantissa(abs_x, exponent, normal_mask, MANTISSA_BITS, EXPONENT_BIAS) + abs_x[:] = calc_mantissa(abs_x, exponent, normal_mask, MANTISSA_BITS, EXPONENT_BIAS, generator=generator) sign *= torch.where( normal_mask, @@ -47,7 +47,7 @@ def manual_stochastic_round_to_float8(x, dtype): -def stochastic_rounding(value, dtype): +def stochastic_rounding(value, dtype, seed=0): if dtype == torch.float32: return value.to(dtype=torch.float32) if dtype == torch.float16: @@ -55,6 +55,8 @@ def stochastic_rounding(value, dtype): if dtype == torch.bfloat16: return value.to(dtype=torch.bfloat16) if dtype == torch.float8_e4m3fn or dtype == torch.float8_e5m2: - return manual_stochastic_round_to_float8(value, dtype) + generator = torch.Generator(device=value.device) + generator.manual_seed(seed) + return manual_stochastic_round_to_float8(value, dtype, generator=generator) return value.to(dtype=dtype) diff --git a/comfy/model_patcher.py b/comfy/model_patcher.py index 59c505413..3f5d90273 100644 --- a/comfy/model_patcher.py +++ b/comfy/model_patcher.py @@ -30,6 +30,18 @@ import comfy.model_management import comfy.lora from comfy.types import UnetWrapperFunction +def string_to_seed(data): + crc = 0xFFFFFFFF + for byte in data: + if isinstance(byte, str): + byte = ord(byte) + crc ^= byte + for _ in range(8): + if crc & 1: + crc = (crc >> 1) ^ 0xEDB88320 + else: + crc >>= 1 + return crc ^ 0xFFFFFFFF def set_model_options_patch_replace(model_options, patch, name, block_name, number, transformer_index=None): to = model_options["transformer_options"].copy() @@ -309,7 +321,7 @@ class ModelPatcher: else: temp_weight = weight.to(torch.float32, copy=True) out_weight = comfy.lora.calculate_weight(self.patches[key], temp_weight, key) - out_weight = comfy.float.stochastic_rounding(out_weight, weight.dtype) + out_weight = comfy.float.stochastic_rounding(out_weight, weight.dtype, seed=string_to_seed(key)) if inplace_update: comfy.utils.copy_to_param(self.model, key, out_weight) else: