From b7143b74ce6df191004fab4a683cb2792c82248b Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 26 Nov 2024 01:33:01 -0500 Subject: [PATCH 1/2] Flux inpaint model does not work in fp16. --- comfy/supported_models.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/comfy/supported_models.py b/comfy/supported_models.py index caa1a1846..b426d3085 100644 --- a/comfy/supported_models.py +++ b/comfy/supported_models.py @@ -659,6 +659,15 @@ class Flux(supported_models_base.BASE): t5_detect = comfy.text_encoders.sd3_clip.t5_xxl_detect(state_dict, "{}t5xxl.transformer.".format(pref)) return supported_models_base.ClipTarget(comfy.text_encoders.flux.FluxTokenizer, comfy.text_encoders.flux.flux_clip(**t5_detect)) +class FluxInpaint(Flux): + unet_config = { + "image_model": "flux", + "guidance_embed": True, + "in_channels": 96, + } + + supported_inference_dtypes = [torch.bfloat16, torch.float32] + class FluxSchnell(Flux): unet_config = { "image_model": "flux", @@ -731,6 +740,6 @@ class LTXV(supported_models_base.BASE): t5_detect = comfy.text_encoders.sd3_clip.t5_xxl_detect(state_dict, "{}t5xxl.transformer.".format(pref)) return supported_models_base.ClipTarget(comfy.text_encoders.lt.LTXVT5Tokenizer, comfy.text_encoders.lt.ltxv_te(**t5_detect)) -models = [Stable_Zero123, SD15_instructpix2pix, SD15, SD20, SD21UnclipL, SD21UnclipH, SDXL_instructpix2pix, SDXLRefiner, SDXL, SSD1B, KOALA_700M, KOALA_1B, Segmind_Vega, SD_X4Upscaler, Stable_Cascade_C, Stable_Cascade_B, SV3D_u, SV3D_p, SD3, StableAudio, AuraFlow, HunyuanDiT, HunyuanDiT1, Flux, FluxSchnell, GenmoMochi, LTXV] +models = [Stable_Zero123, SD15_instructpix2pix, SD15, SD20, SD21UnclipL, SD21UnclipH, SDXL_instructpix2pix, SDXLRefiner, SDXL, SSD1B, KOALA_700M, KOALA_1B, Segmind_Vega, SD_X4Upscaler, Stable_Cascade_C, Stable_Cascade_B, SV3D_u, SV3D_p, SD3, StableAudio, AuraFlow, HunyuanDiT, HunyuanDiT1, FluxInpaint, Flux, FluxSchnell, GenmoMochi, LTXV] models += [SVD_img2vid] From 15c39ea75774b5d36065eb8caa5ecbbfcd168b71 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 26 Nov 2024 03:34:36 -0500 Subject: [PATCH 2/2] Support for the official mochi lora format. --- comfy/lora.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/comfy/lora.py b/comfy/lora.py index 18602f24c..1080169b1 100644 --- a/comfy/lora.py +++ b/comfy/lora.py @@ -62,6 +62,7 @@ def load_lora(lora, to_load): diffusers_lora = "{}_lora.up.weight".format(x) diffusers2_lora = "{}.lora_B.weight".format(x) diffusers3_lora = "{}.lora.up.weight".format(x) + mochi_lora = "{}.lora_B".format(x) transformers_lora = "{}.lora_linear_layer.up.weight".format(x) A_name = None @@ -81,6 +82,10 @@ def load_lora(lora, to_load): A_name = diffusers3_lora B_name = "{}.lora.down.weight".format(x) mid_name = None + elif mochi_lora in lora.keys(): + A_name = mochi_lora + B_name = "{}.lora_A".format(x) + mid_name = None elif transformers_lora in lora.keys(): A_name = transformers_lora B_name ="{}.lora_linear_layer.down.weight".format(x) @@ -362,6 +367,12 @@ def model_lora_keys_unet(model, key_map={}): key_map["lycoris_{}".format(k[:-len(".weight")].replace(".", "_"))] = to #simpletrainer lycoris key_map["lora_transformer_{}".format(k[:-len(".weight")].replace(".", "_"))] = to #onetrainer + if isinstance(model, comfy.model_base.GenmoMochi): + for k in sdk: + if k.startswith("diffusion_model.") and k.endswith(".weight"): #Official Mochi lora format + key_lora = k[len("diffusion_model."):-len(".weight")] + key_map["{}".format(key_lora)] = k + return key_map