diff --git a/comfy/ldm/modules/attention.py b/comfy/ldm/modules/attention.py index 9b2074501..0c54f7f47 100644 --- a/comfy/ldm/modules/attention.py +++ b/comfy/ldm/modules/attention.py @@ -565,7 +565,10 @@ class BasicTransformerBlock(nn.Module): for p in patch: n, context_attn1, value_attn1 = p(n, context_attn1, value_attn1, extra_options) - transformer_block = (block[0], block[1], block_index) + if block is not None: + transformer_block = (block[0], block[1], block_index) + else: + transformer_block = None attn1_replace_patch = transformer_patches_replace.get("attn1", {}) block_attn1 = transformer_block if block_attn1 not in attn1_replace_patch: diff --git a/comfy/sd.py b/comfy/sd.py index 74c144ba0..6feb0de43 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -961,12 +961,19 @@ def load_style_model(ckpt_path): def load_clip(ckpt_path, embedding_directory=None): clip_data = utils.load_torch_file(ckpt_path, safe_load=True) - config = {} + class EmptyClass: + pass + + clip_target = EmptyClass() + clip_target.params = {} if "text_model.encoder.layers.22.mlp.fc1.weight" in clip_data: - config['target'] = 'comfy.ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder' + clip_target.clip = sd2_clip.SD2ClipModel + clip_target.tokenizer = sd2_clip.SD2Tokenizer else: - config['target'] = 'comfy.ldm.modules.encoders.modules.FrozenCLIPEmbedder' - clip = CLIP(config=config, embedding_directory=embedding_directory) + clip_target.clip = sd1_clip.SD1ClipModel + clip_target.tokenizer = sd1_clip.SD1Tokenizer + + clip = CLIP(clip_target, embedding_directory=embedding_directory) clip.load_from_state_dict(clip_data) return clip