mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2026-02-14 07:22:36 +08:00
added clap tokenizer
This commit is contained in:
parent
8311b156ad
commit
2ceb9f0fdc
50001
clap_tokenizer/merges.txt
Normal file
50001
clap_tokenizer/merges.txt
Normal file
File diff suppressed because it is too large
Load Diff
51
clap_tokenizer/special_tokens_map.json
Normal file
51
clap_tokenizer/special_tokens_map.json
Normal file
@ -0,0 +1,51 @@
|
|||||||
|
{
|
||||||
|
"bos_token": {
|
||||||
|
"content": "<s>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false
|
||||||
|
},
|
||||||
|
"cls_token": {
|
||||||
|
"content": "<s>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false
|
||||||
|
},
|
||||||
|
"eos_token": {
|
||||||
|
"content": "</s>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false
|
||||||
|
},
|
||||||
|
"mask_token": {
|
||||||
|
"content": "<mask>",
|
||||||
|
"lstrip": true,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false
|
||||||
|
},
|
||||||
|
"pad_token": {
|
||||||
|
"content": "<pad>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false
|
||||||
|
},
|
||||||
|
"sep_token": {
|
||||||
|
"content": "</s>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false
|
||||||
|
},
|
||||||
|
"unk_token": {
|
||||||
|
"content": "<unk>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false
|
||||||
|
}
|
||||||
|
}
|
||||||
250364
clap_tokenizer/tokenizer.json
Normal file
250364
clap_tokenizer/tokenizer.json
Normal file
File diff suppressed because it is too large
Load Diff
63
clap_tokenizer/tokenizer_config.json
Normal file
63
clap_tokenizer/tokenizer_config.json
Normal file
@ -0,0 +1,63 @@
|
|||||||
|
{
|
||||||
|
"add_prefix_space": false,
|
||||||
|
"added_tokens_decoder": {
|
||||||
|
"0": {
|
||||||
|
"content": "<s>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": true
|
||||||
|
},
|
||||||
|
"1": {
|
||||||
|
"content": "<pad>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": true
|
||||||
|
},
|
||||||
|
"2": {
|
||||||
|
"content": "</s>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": true
|
||||||
|
},
|
||||||
|
"3": {
|
||||||
|
"content": "<unk>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": true
|
||||||
|
},
|
||||||
|
"50264": {
|
||||||
|
"content": "<mask>",
|
||||||
|
"lstrip": true,
|
||||||
|
"normalized": false,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false,
|
||||||
|
"special": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"bos_token": "<s>",
|
||||||
|
"clean_up_tokenization_spaces": true,
|
||||||
|
"cls_token": "<s>",
|
||||||
|
"eos_token": "</s>",
|
||||||
|
"errors": "replace",
|
||||||
|
"extra_special_tokens": {},
|
||||||
|
"mask_token": "<mask>",
|
||||||
|
"max_length": null,
|
||||||
|
"model_max_length": 512,
|
||||||
|
"pad_to_multiple_of": null,
|
||||||
|
"pad_token": "<pad>",
|
||||||
|
"pad_token_type_id": 0,
|
||||||
|
"padding_side": "right",
|
||||||
|
"processor_class": "ClapProcessor",
|
||||||
|
"sep_token": "</s>",
|
||||||
|
"tokenizer_class": "RobertaTokenizer",
|
||||||
|
"trim_offsets": true,
|
||||||
|
"unk_token": "<unk>"
|
||||||
|
}
|
||||||
1
clap_tokenizer/vocab.json
Normal file
1
clap_tokenizer/vocab.json
Normal file
File diff suppressed because one or more lines are too long
@ -4,7 +4,7 @@ from . import utils
|
|||||||
|
|
||||||
from . import sd1_clip
|
from . import sd1_clip
|
||||||
from . import sdxl_clip
|
from . import sdxl_clip
|
||||||
import comfy.clap_model
|
import comfy.text_encoders.clap_model
|
||||||
import comfy.text_encoders.sd2_clip
|
import comfy.text_encoders.sd2_clip
|
||||||
import comfy.text_encoders.sd3_clip
|
import comfy.text_encoders.sd3_clip
|
||||||
import comfy.text_encoders.sa_t5
|
import comfy.text_encoders.sa_t5
|
||||||
@ -1317,7 +1317,7 @@ class HunyuanFoley(supported_models_base.BASE):
|
|||||||
def get_model(self, state_dict, prefix="", device=None):
|
def get_model(self, state_dict, prefix="", device=None):
|
||||||
return model_base.HunyuanFoley(self, device=device)
|
return model_base.HunyuanFoley(self, device=device)
|
||||||
def clip_target(self, state_dict={}):
|
def clip_target(self, state_dict={}):
|
||||||
return supported_models_base.ClipTarget(comfy.clap_model.ClapLargeTokenizer, comfy.clap_model.ClapTextEncoderModel)
|
return supported_models_base.ClipTarget(comfy.text_encoders.clap_model.ClapLargeTokenizer, comfy.text_encoders.clap_model.ClapTextEncoderModel)
|
||||||
|
|
||||||
class QwenImage(supported_models_base.BASE):
|
class QwenImage(supported_models_base.BASE):
|
||||||
unet_config = {
|
unet_config = {
|
||||||
|
|||||||
@ -353,4 +353,3 @@ class ClapLargeTokenizer(sd1_clip.SDTokenizer):
|
|||||||
def __init__(self, embedding_directory=None, tokenizer_data={}):
|
def __init__(self, embedding_directory=None, tokenizer_data={}):
|
||||||
tokenizer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "clap_tokenizer")
|
tokenizer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "clap_tokenizer")
|
||||||
super().__init__(tokenizer_path, pad_with_end=False, embedding_size=2048, embedding_key='clap_l', tokenizer_class=AutoTokenizer, has_start_token=False, has_end_token=False, pad_to_max_length=False, max_length=99999999, min_length=1, pad_token=151643, tokenizer_data=tokenizer_data)
|
super().__init__(tokenizer_path, pad_with_end=False, embedding_size=2048, embedding_key='clap_l', tokenizer_class=AutoTokenizer, has_start_token=False, has_end_token=False, pad_to_max_length=False, max_length=99999999, min_length=1, pad_token=151643, tokenizer_data=tokenizer_data)
|
||||||
|
|
||||||
Loading…
Reference in New Issue
Block a user