From d3f5998218ee488dd64fc63c8d095850cccab2b9 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sun, 9 Jul 2023 09:33:53 -0400 Subject: [PATCH 1/2] Support loading clip_g from diffusers in CLIP Loader nodes. --- comfy/sdxl_clip.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/comfy/sdxl_clip.py b/comfy/sdxl_clip.py index f676d8c81..d9298b205 100644 --- a/comfy/sdxl_clip.py +++ b/comfy/sdxl_clip.py @@ -35,6 +35,8 @@ class SDXLClipG(sd1_clip.SD1ClipModel): def load_sd(self, sd): if "text_projection" in sd: self.text_projection[:] = sd.pop("text_projection") + if "text_projection.weight" in sd: + self.text_projection[:] = sd.pop("text_projection.weight").transpose(0, 1) return super().load_sd(sd) class SDXLClipGTokenizer(sd1_clip.SD1Tokenizer): From 0ae81c03bb035d3db46b86d48227eed3998872ba Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sun, 9 Jul 2023 09:56:03 -0400 Subject: [PATCH 2/2] Empty cache after model unloading for normal vram and lower. --- comfy/model_management.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/comfy/model_management.py b/comfy/model_management.py index 09dcaa295..e148408b8 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -238,6 +238,8 @@ def unload_model(): current_loaded_model.model_patches_to(current_loaded_model.offload_device) current_loaded_model.unpatch_model() current_loaded_model = None + if vram_state != VRAMState.HIGH_VRAM: + soft_empty_cache() if vram_state != VRAMState.HIGH_VRAM: if len(current_gpu_controlnets) > 0: