+
+ We will fill [AUDIO] with the audio features extracted by Whisper and fill [AUDIO_OUT] with the audio tokens.
+
+ Consider the following example for mixed text/audio generation:
+
+ text: <|audio_out_bos|> MASK MASK MASK MASK MASK <|audio_eos|> [text_token1]
+ audio: MASK <|audio_stream_bos|> [audio_token1] [audio_token2] [audio_token3] <|audio_stream_eos|> MASK MASK
+ token_type: 0 1 1 1 1 1 0 0
+
+ """
+
+ _supports_cache_class = True
+ _supports_static_cache = True
+
+ def __init__(self, config: HiggsAudioConfig):
+ super().__init__(config)
+ self.padding_idx = config.pad_token_id
+ self.audio_in_token_idx = config.audio_in_token_idx
+ self.audio_out_token_idx = config.audio_out_token_idx
+ self.audio_out_bos_token_id = config.audio_out_bos_token_id if "audio_out_bos_token_id" in config else None
+ self.audio_eos_token_id = config.audio_eos_token_id if "audio_eos_token_id" in config else None
+ self.vocab_size = config.text_config.vocab_size
+ self.audio_num_codebooks = config.audio_num_codebooks
+ self.use_delay_pattern = config.use_delay_pattern
+ self.use_audio_out_embed_projector = config.use_audio_out_embed_projector
+ self.use_audio_out_self_attention = config.use_audio_out_self_attention
+
+ self.embed_tokens = nn.Embedding(self.vocab_size, config.text_config.hidden_size, self.padding_idx)
+
+ if config.audio_adapter_type == "dual_ffn":
+ layer_idx = 0
+ layers = []
+ for j in range(config.text_config.num_hidden_layers):
+ if j in config.audio_dual_ffn_layers:
+ layers.append(
+ HiggsAudioDualFFNDecoderLayer(
+ config, layer_idx, use_audio_attention=self.use_audio_out_self_attention
+ )
+ )
+ layer_idx += 2 if self.use_audio_out_self_attention else 1
+ else:
+ layers.append(LlamaDecoderLayer(config.text_config, layer_idx))
+ layer_idx += 1
+ self.layers = nn.ModuleList(layers)
+ elif config.audio_adapter_type == "dual_ffn_fast_forward":
+ layer_idx = 0
+ layers = []
+ for j in range(config.text_config.num_hidden_layers):
+ if j in config.audio_dual_ffn_layers:
+ layers.append(
+ HiggsAudioDualFFNDecoderLayer(
+ config,
+ layer_idx,
+ fast_forward=False,
+ use_audio_attention=self.use_audio_out_self_attention,
+ )
+ )
+ layer_idx += 2 if self.use_audio_out_self_attention else 1
+ else:
+ layers.append(
+ HiggsAudioDualFFNDecoderLayer(config, layer_idx, fast_forward=True, use_audio_attention=False)
+ )
+ layer_idx += 1
+ self.layers = nn.ModuleList(layers)
+ elif config.audio_adapter_type == "stack":
+ self.layers = nn.ModuleList(
+ [
+ LlamaDecoderLayer(config.text_config, layer_idx)
+ for layer_idx in range(config.text_config.num_hidden_layers)
+ ]
+ )
+ layer_idx = config.text_config.num_hidden_layers
+ else:
+ raise NotImplementedError(f"Audio adapter type {config.audio_adapter_type} not implemented.")
+
+ self.num_activation_checkpointing_layers = len(self.layers)
+
+ self.decode_graph_runners = defaultdict(dict[bool, CUDAGraphRunner])
+ self.norm = RMSNorm(config.text_config.hidden_size, eps=config.text_config.rms_norm_eps)
+ self.rotary_emb = LLama3RoPE(config = config.text_config)#LlamaRotaryEmbedding(config=config.text_config)
+
+ if not config.skip_audio_tower:
+ self.audio_tower = HiggsAudioEncoder(config.audio_encoder_config)
+ self.audio_encoder_proj = HiggsAudioFeatureProjector(config)
+ else:
+ self.audio_tower = None
+ self.audio_encoder_proj = None
+ self.audio_decoder_proj = HiggsAudioDecoderProjector(config, layer_idx=layer_idx)
+ self.audio_codebook_size = (
+ config.audio_codebook_size + 2
+ ) # We add 1 for the audio_stream_bos token and 1 for the audio_stream_eos token
+
+ if config.use_audio_out_embed_projector:
+ self.audio_out_embed_projector = nn.Linear(
+ config.text_config.hidden_size, config.text_config.hidden_size, bias=False
+ )
+
+ self.audio_codebook_embeddings = nn.Embedding(
+ config.audio_num_codebooks * self.audio_codebook_size, config.text_config.hidden_size
+ )
+
+ self.audio_codebook_weights = (
+ torch.ones(config.audio_num_codebooks) / config.audio_num_codebooks
+ ) # default to equal weights
+ self.post_init()
+
+ def set_num_activation_checkpointing_layers(self, num_layers):
+ self.num_activation_checkpointing_layers = num_layers
+
+ def set_delay_pattern(self):
+ self.config.use_delay_pattern = True
+ self.use_delay_pattern = True
+
+ def set_audio_special_tokens(self, tokenizer: AutoTokenizer):
+ self.audio_out_bos_token_id = tokenizer.convert_tokens_to_ids("<|audio_out_bos|>")
+ self.audio_eos_token_id = tokenizer.convert_tokens_to_ids("<|audio_eos|>")
+
+ def _embed_audio_ids(self, audio_ids):
+ """Embed the audio ids
+
+ Args:
+ audio_ids: torch.LongTensor of shape (num_codebooks, audio_in_total_length)
+
+ Returns:
+ audio_embed: torch.LongTensor of shape (audio_in_total_length, hidden_size)
+ """
+ codebook_shift = (
+ torch.arange(self.config.audio_num_codebooks, device=audio_ids.device) * self.audio_codebook_size
+ )
+ audio_embed = self.audio_codebook_embeddings(audio_ids + codebook_shift.unsqueeze(-1))
+ if self.config.audio_embed_avg:
+ audio_embed = torch.mean(audio_embed, dim=0)
+ else:
+ audio_embed = torch.sum(audio_embed, dim=0)
+ if self.use_audio_out_embed_projector:
+ audio_embed = self.audio_out_embed_projector(audio_embed)
+ return audio_embed
+
+ def _apply_audio_tower(self, audio_features, audio_feature_attention_mask):
+ """Apply the audio tower to the audio features"""
+
+ if audio_features.shape[0] == 0:
+ if torch.is_grad_enabled():
+ # FIXME!!!!!!!!
+ # This is a hack to ensure that the forward+backward pass of audio_tower and audio_encoder_proj get triggered.
+ # The monkey patch won't overwrite the backward pass of nn.Module.
+ audio_outputs = _whisper_encoder_zero_shape_forward(
+ self.audio_tower, audio_features, attention_mask=None, check_seq_length=False
+ )
+ selected_audio_feature = audio_outputs.last_hidden_state
+ audio_features_embed = self.audio_encoder_proj(selected_audio_feature)
+ audio_feat_out_lengths = None
+ return audio_features_embed, audio_feat_out_lengths
+ else:
+ return None, None
+
+ audio_feat_lengths, audio_feat_out_lengths = self.audio_tower._get_feat_extract_output_lengths(
+ audio_feature_attention_mask.sum(-1)
+ )
+ batch_size, _, max_mel_seq_len = audio_features.shape
+ max_seq_len = (max_mel_seq_len - 1) // 2 + 1
+ # Create a sequence tensor of shape (batch_size, max_seq_len)
+ seq_range = (
+ torch.arange(0, max_seq_len, dtype=audio_feat_lengths.dtype, device=audio_feat_lengths.device)
+ .unsqueeze(0)
+ .expand(batch_size, max_seq_len)
+ )
+ lengths_expand = audio_feat_lengths.unsqueeze(1).expand(batch_size, max_seq_len)
+ # Create mask
+ padding_mask = seq_range < lengths_expand
+
+ if self.config._attn_implementation != "flash_attention_2":
+ audio_attention_mask = padding_mask.view(batch_size, 1, 1, max_seq_len).expand(
+ batch_size, 1, max_seq_len, max_seq_len
+ )
+ else:
+ audio_attention_mask = padding_mask
+
+ audio_outputs = self.audio_tower(audio_features, attention_mask=audio_attention_mask)
+ selected_audio_feature = audio_outputs.last_hidden_state
+ audio_features_embed = self.audio_encoder_proj(selected_audio_feature)
+
+ return audio_features_embed, audio_feat_out_lengths
+
+ def _update_causal_mask(
+ self,
+ attention_mask: torch.Tensor,
+ input_tensor: torch.Tensor,
+ cache_position: torch.Tensor,
+ past_key_values: Cache,
+ output_attentions: bool,
+ ):
+ if self.config._attn_implementation == "flash_attention_2":
+ if attention_mask is not None and 0.0 in attention_mask:
+ return attention_mask
+ return None
+
+
+ # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in
+ # order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail
+ # to infer the attention mask.
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
+ using_static_cache = isinstance(past_key_values, StaticCache)
+
+ # When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward
+ if self.config._attn_implementation == "sdpa" and not using_static_cache and not output_attentions:
+ if AttentionMaskConverter._ignore_causal_mask_sdpa(
+ attention_mask,
+ inputs_embeds=input_tensor,
+ past_key_values_length=past_seen_tokens,
+ is_training=self.training,
+ ):
+ return None
+
+ dtype, device = input_tensor.dtype, input_tensor.device
+ min_dtype = torch.finfo(dtype).min
+ sequence_length = input_tensor.shape[1]
+ if using_static_cache:
+ target_length = past_key_values.get_max_length()
+ else:
+ target_length = (
+ attention_mask.shape[-1]
+ if isinstance(attention_mask, torch.Tensor)
+ else past_seen_tokens + sequence_length + 1
+ )
+
+ # In case the provided `attention` mask is 2D, we generate a causal mask here (4D).
+ causal_mask = _prepare_4d_causal_attention_mask_with_cache_position(
+ attention_mask,
+ sequence_length=sequence_length,
+ target_length=target_length,
+ dtype=dtype,
+ device=device,
+ min_dtype=min_dtype,
+ cache_position=cache_position,
+ batch_size=input_tensor.shape[0],
+ )
+
+ if (
+ self.config._attn_implementation == "sdpa"
+ and attention_mask is not None
+ and attention_mask.device.type == "cuda"
+ and not output_attentions
+ ):
+ # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when
+ # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
+ # Details: https://github.com/pytorch/pytorch/issues/110213
+ causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
+
+ return causal_mask
+
+ def _prepare_all_static_kv_cache_masks(self, hidden_states, attention_mask, audio_out_mask, past_key_values):
+ target_length = hidden_states.shape[1]
+ cur_pos = audio_out_mask.shape[1]
+ min_dtype = torch.finfo(hidden_states.dtype).min
+ assert len(attention_mask.shape) == 4, "Only support SDPA for now"
+ kv_cache_len = past_key_values.get_max_cache_shape()
+ audio_out_mask_padded = torch.nn.functional.pad(audio_out_mask, (0, kv_cache_len - cur_pos), value=True)
+ fast_forward_attention_mask = attention_mask.masked_fill(
+ audio_out_mask_padded[:, audio_out_mask.shape[1] - target_length : audio_out_mask.shape[1]].reshape(
+ audio_out_mask_padded.shape[0], 1, target_length, 1
+ )
+ | audio_out_mask_padded.reshape(audio_out_mask_padded.shape[0], 1, 1, audio_out_mask_padded.shape[1]),
+ min_dtype,
+ )
+
+ no_audio_out_mask = ~audio_out_mask
+ no_audio_out_mask = torch.nn.functional.pad(
+ no_audio_out_mask, (0, kv_cache_len - audio_out_mask.shape[1]), value=False
+ )
+ no_audio_out_mask = no_audio_out_mask[
+ :, audio_out_mask.shape[1] - target_length : audio_out_mask.shape[1]
+ ].reshape(audio_out_mask.shape[0], 1, target_length, 1) | no_audio_out_mask.reshape(
+ audio_out_mask.shape[0], 1, 1, kv_cache_len
+ )
+ audio_attention_mask = attention_mask.masked_fill(no_audio_out_mask, min_dtype)
+ return fast_forward_attention_mask, audio_attention_mask
+
+ def _forward_core(
+ self,
+ hidden_states: torch.Tensor,
+ causal_mask: torch.Tensor,
+ position_ids: torch.Tensor,
+ audio_discrete_codes_mask: torch.Tensor,
+ cache_position: torch.Tensor,
+ past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]],
+ use_cache: bool,
+ audio_attention_mask: torch.Tensor,
+ fast_forward_attention_mask: torch.Tensor,
+ output_attentions: bool,
+ output_hidden_states: bool,
+ is_decoding_audio_token: Optional[bool] = None,
+ is_using_cuda_graph: Optional[bool] = False,
+ ):
+ # create position embeddings to be shared across the decoder layers
+ # When past_key_values is passed in, we need to offset the position ids when calculating the position embeddings.
+ # Therefore, cache_position is used.
+ position_id_offset = cache_position[0] if use_cache else 0
+ position_embeddings = self.rotary_emb(hidden_states, position_ids + position_id_offset)
+ # decoder layers
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attns = () if output_attentions else None
+
+ for decoder_layer in self.layers:
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+ if isinstance(decoder_layer, HiggsAudioDualFFNDecoderLayer):
+ layer_outputs = decoder_layer(
+ hidden_states,
+ attention_mask=causal_mask,
+ audio_attention_mask=audio_attention_mask,
+ fast_forward_attention_mask=fast_forward_attention_mask,
+ position_ids=position_ids,
+ audio_out_mask=audio_discrete_codes_mask,
+ is_decoding_audio_token=is_decoding_audio_token,
+ past_key_value=past_key_values,
+ output_attentions=output_attentions,
+ use_cache=use_cache,
+ cache_position=cache_position,
+ position_embeddings=position_embeddings,
+ is_using_cuda_graph=is_using_cuda_graph,
+ )
+ else:
+ layer_outputs = decoder_layer(
+ hidden_states,
+ attention_mask=causal_mask,
+ position_ids=position_ids,
+ past_key_value=past_key_values,
+ output_attentions=output_attentions,
+ use_cache=use_cache,
+ cache_position=cache_position,
+ position_embeddings=position_embeddings,
+ )
+
+ hidden_states = layer_outputs[0]
+
+ if output_attentions:
+ all_self_attns += (layer_outputs[1],)
+
+ return hidden_states, all_hidden_states, all_self_attns
+
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.BoolTensor] = None,
+ audio_in_ids: Optional[torch.LongTensor] = None,
+ audio_in_ids_start: Optional[torch.LongTensor] = None,
+ audio_out_ids: Optional[torch.LongTensor] = None,
+ audio_out_ids_start: Optional[torch.LongTensor] = None,
+ label_ids: Optional[torch.LongTensor] = None,
+ label_audio_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_audio_hidden_states: Optional[bool] = False,
+ return_dict: Optional[bool] = None,
+ cache_position: Optional[torch.LongTensor] = None,
+ cache_audio_discrete_codes_mask: Optional[torch.LongTensor] = None,
+ past_key_values_buckets: Optional[OrderedDict[int, Cache]] = None,
+ reward = None,
+ audio_features = None,
+ **kwargs
+ #**kwargs
+ ):
+
+ target_device = input_ids.device
+
+ inputs_embeds = self.embed_tokens(input_ids)
+
+ if self.config.encode_audio_in_tokens:
+ if audio_in_ids is not None and audio_in_ids.shape[-1] > 0:
+ audio_in_ids = audio_in_ids.to(target_device)
+ else:
+ audio_in_ids = torch.zeros((self.audio_num_codebooks, 0), device=target_device, dtype=torch.long)
+ audio_in_embed = self._embed_audio_ids(audio_in_ids)
+ else:
+ audio_in_embed = None
+
+ if audio_out_ids is not None and audio_out_ids.shape[-1] > 0:
+ audio_out_ids = audio_out_ids.to(target_device)
+ else:
+ audio_out_ids = torch.zeros((self.audio_num_codebooks, 0), device=target_device, dtype=torch.long)
+ audio_out_embed = self._embed_audio_ids(audio_out_ids)
+
+ round_to = 1 if use_cache else 8
+ left_padding = True if use_cache or input_ids.shape[0] == 1 else False
+ (
+ inputs_embeds,
+ attention_mask,
+ labels,
+ position_ids,
+ input_ids,
+ audio_in_mask,
+ audio_in_discrete_codes_mask,
+ audio_out_mask,
+ ) = merge_input_ids_with_audio_features(
+ audio_in_embed,
+ audio_in_ids_start,
+ audio_out_embed,
+ audio_out_ids_start,
+ self.audio_in_token_idx,
+ self.audio_out_token_idx,
+ inputs_embeds,
+ input_ids,
+ attention_mask,
+ label_ids,
+ pad_token_id=self.padding_idx,
+ round_to=round_to,
+ left_padding=left_padding,
+ )
+
+ # re-check if we use the correct kv cache bucket after
+ # the input_embeds has been merged with audio features
+ if past_key_values_buckets is not None and inputs_embeds.shape[1] > past_key_values.get_max_cache_shape():
+ past_key_values, self.current_past_key_values_bucket = self._prepare_kv_cache(
+ inputs_embeds.shape[1], None, past_key_values_buckets
+ )
+
+ if use_cache and past_key_values is None:
+ past_key_values = DynamicCache()
+
+ if cache_position is None:
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
+ cache_position = torch.arange(
+ past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
+ )
+ if isinstance(past_key_values, StaticCache) and past_seen_tokens >= past_key_values.get_max_cache_shape():
+ raise ValueError(
+ f"The current sequence length ({past_seen_tokens}) exceeds "
+ f"the maximum cache shape. "
+ f"Please consider increasing the cache size."
+ )
+
+ # Use torch compile
+ use_static_cache = isinstance(past_key_values, StaticCache)
+
+ # Apply the LLM component
+ causal_mask = self._update_causal_mask(
+ attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions
+ )
+
+ hidden_states = inputs_embeds
+
+ audio_discrete_codes_mask = audio_in_discrete_codes_mask | audio_out_mask
+ if cache_audio_discrete_codes_mask is not None and use_cache:
+ audio_discrete_codes_mask = torch.concat(
+ [cache_audio_discrete_codes_mask, audio_discrete_codes_mask], dim=1
+ )
+
+ # Generate the audio attention mask outside the layer to avoid recompilation
+ if use_static_cache:
+ fast_forward_attention_mask, audio_attention_mask = self._prepare_all_static_kv_cache_masks(
+ hidden_states, causal_mask, audio_discrete_codes_mask, past_key_values
+ )
+ # Set the audio out mask to the last token
+ if hidden_states.shape[1] == 1:
+ audio_discrete_codes_mask = audio_discrete_codes_mask[:, -1:]
+ audio_discrete_codes_mask = audio_discrete_codes_mask.reshape((-1, 1)).contiguous()
+ is_decoding_audio_token = audio_discrete_codes_mask.item()
+ else:
+ is_decoding_audio_token = False
+
+ if (
+ past_key_values is not None
+ and past_key_values.get_max_cache_shape() in self.decode_graph_runners
+ and (input_ids.shape[-1] == 1)
+ ):
+ _forward_core = self.decode_graph_runners[past_key_values.get_max_cache_shape()][is_decoding_audio_token]
+ is_using_cuda_graph = True
+ else:
+ _forward_core = self._forward_core
+ is_using_cuda_graph = False
+
+ hidden_states = _forward_core(
+ hidden_states=hidden_states,
+ causal_mask=causal_mask,
+ position_ids=position_ids,
+ audio_discrete_codes_mask=audio_discrete_codes_mask,
+ is_decoding_audio_token=is_decoding_audio_token if use_static_cache else None,
+ cache_position=cache_position,
+ past_key_values=past_key_values,
+ use_cache=use_cache,
+ audio_attention_mask=audio_attention_mask if use_static_cache else None,
+ fast_forward_attention_mask=fast_forward_attention_mask if use_static_cache else None,
+ is_using_cuda_graph=is_using_cuda_graph,
+ output_hidden_states = False,
+ output_attentions = False
+ )
+ #print(hidden_states)
+ hidden_states = self.norm(hidden_states[0])
+
+ # Apply the audio decoder projector
+ logits, audio_logits = (
+ self.audio_decoder_proj(
+ hidden_states,
+ audio_out_mask,
+ label_audio_ids=label_audio_ids,
+ attention_mask=causal_mask,
+ position_ids=position_ids,
+ past_key_values=past_key_values,
+ use_cache=use_cache,
+ output_audio_hidden_states=output_audio_hidden_states,
+ cache_position=cache_position,
+ )
+ )
+
+ if audio_logits is not None:
+ audio_logits = audio_logits.view(
+ audio_logits.shape[0], self.audio_num_codebooks, self.audio_codebook_size
+ ).float()
+
+ next_cache = past_key_values if use_cache else None
+
+ ret = HiggsAudioModelOutputWithPast(
+ logits=logits,
+ audio_logits=audio_logits,
+ past_key_values=next_cache,
+ audio_out_mask = audio_out_mask,
+ audio_in_discrete_codes_mask = audio_in_discrete_codes_mask
+ )
+
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ if not return_dict:
+ outputs = ret.to_tuple()
+ return outputs
+
+ return ret
+
+ # Overwrite GenerationMixin._update_model_kwargs_for_generation
+ def _update_model_kwargs_for_generation(
+ self,
+ outputs: ModelOutput,
+ model_kwargs: Dict[str, Any],
+ is_encoder_decoder: bool = False,
+ num_new_tokens: int = 1,
+ extend_attention_mask: bool = True,
+ ) -> Dict[str, Any]:
+ """Update the model kwargs for each step."""
+ model_kwargs["past_key_values"] = outputs.past_key_values
+
+ # update attention mask
+ if "attention_mask" in model_kwargs:
+ attention_mask = model_kwargs["attention_mask"]
+ if extend_attention_mask:
+ model_kwargs["attention_mask"] = torch.cat(
+ [attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))], dim=-1
+ )
+ if "cache_audio_discrete_codes_mask" in model_kwargs:
+ if model_kwargs["cache_audio_discrete_codes_mask"] is None:
+ model_kwargs["cache_audio_discrete_codes_mask"] = (
+ outputs.audio_in_discrete_codes_mask | outputs.audio_out_mask
+ )
+ else:
+ model_kwargs["cache_audio_discrete_codes_mask"] = torch.concat(
+ [
+ model_kwargs["cache_audio_discrete_codes_mask"],
+ outputs.audio_in_discrete_codes_mask | outputs.audio_out_mask,
+ ],
+ 1,
+ )
+
+ return model_kwargs
+
+ def _copy_kv_cache(self, from_cache: Cache, to_cache: Cache):
+ num_layers = self.config.text_config.num_hidden_layers
+ if self.config.audio_dual_ffn_layers is not None:
+ num_layers += len(self.config.audio_dual_ffn_layers)
+ """ Copy the key-value pairs from one cache to another. """
+ for layer_idx in range(num_layers):
+ from_cache_size = from_cache.get_max_cache_shape()
+ assert to_cache.get_max_cache_shape() >= from_cache_size, (
+ f"The target cache size {to_cache.get_max_cache_shape()} is smaller than the source cache size {from_cache_size}."
+ )
+ to_cache.key_cache[layer_idx][:, :, :from_cache_size, :] = from_cache.key_cache[layer_idx]
+ to_cache.value_cache[layer_idx][:, :, :from_cache_size, :] = from_cache.value_cache[layer_idx]
+
+ def _prepare_kv_cache(
+ self,
+ current_sequence_length: int,
+ current_past_key_values_bucket: Optional[int],
+ past_key_values_buckets: OrderedDict[int, Cache],
+ ) -> Tuple[Optional[Cache], Optional[int]]:
+ """Prepare the KV cache for the current sequence length."""
+ for cache_length in past_key_values_buckets.keys():
+ if cache_length >= current_sequence_length:
+ # Promote to the next KV cache bucket, copy the current KV cache bucket
+ # to the new one.
+ if current_past_key_values_bucket is not None and cache_length != current_past_key_values_bucket:
+ self._copy_kv_cache(
+ past_key_values_buckets[current_past_key_values_bucket], past_key_values_buckets[cache_length]
+ )
+
+ return past_key_values_buckets[cache_length], cache_length
+
+ raise ValueError(
+ f"The current sequence length {current_sequence_length} is larger than "
+ f"all past key values buckets {past_key_values_buckets.keys()}."
+ )
+
+ def _sample_audio_tokens(
+ self,
+ hidden_states: torch.Tensor,
+ audio_logits: torch.Tensor,
+ audio_out_ids: torch.Tensor,
+ do_sample: bool,
+ logits_processor: LogitsProcessorList,
+ device: torch.device,
+ torch_generator: Optional[torch.Generator],
+ generation_config: GenerationConfig,
+ num_delay: int,
+ num_remaining_delays: Optional[int],
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, int, Optional[int]]:
+ """Sample audio tokens and its corresponding text tokens from the logits"""
+
+ # parameters related to repetition aware sampling
+ ras_win_len = generation_config.generation_kwargs.get("ras_win_len", None)
+ ras_win_max_num_repeat = generation_config.generation_kwargs.get("ras_win_max_num_repeat", 2)
+ audio_eos_token_id = generation_config.generation_kwargs.get("audio_eos_token_id", None)
+ # In the audio generation mode, we sample from audio_logits and keep updating audio_out_ids.
+ next_audio_token_logits = audio_logits.clone()[-1, :, :].float().to(device)
+ # TopP, TopK logits processor supports empty input_ids
+ next_audio_token_scores = logits_processor(None, next_audio_token_logits)
+
+ # token selection
+ if do_sample:
+ # next_audio_token_scores has been applied top_p, top_k, and temperature.
+ probs = nn.functional.softmax(next_audio_token_scores, dim=-1)
+ # TODO (joao): this OP throws "skipping cudagraphs due to ['incompatible ops']", find solution
+ next_audio_tokens = torch.multinomial(probs, num_samples=1, generator=torch_generator).squeeze(1)
+ else:
+ next_audio_tokens = torch.argmax(next_audio_token_scores, dim=-1)
+
+ # next_tokens: (num_codebooks, )
+ if ras_win_len is not None:
+ # check if there are repetitions over a window of tokens.
+ rep_num = (audio_out_ids[:, -ras_win_len:] == next_audio_tokens.unsqueeze(1)).sum(dim=1)
+
+ # if we saw repeated tokens in the most recent window of tokens, resample without temperature.
+ row_indices = torch.nonzero(rep_num >= ras_win_max_num_repeat).squeeze(1)
+ resampled_next_tokens = (
+ next_audio_token_logits[row_indices]
+ .softmax(dim=-1)
+ .multinomial(1, replacement=True, generator=torch_generator)
+ .squeeze(1)
+ )
+ next_audio_tokens[row_indices] = resampled_next_tokens
+
+ # Force the next text tokens to be <|AUDIO_OUT|> in audio generation mode
+ next_tokens = torch.full(
+ (audio_logits.shape[0],),
+ self.config.audio_out_token_idx,
+ dtype=torch.long,
+ device=device,
+ )
+
+ # Handle delay_pattern
+ if self.use_delay_pattern:
+ if num_delay + 1 < next_audio_tokens.shape[0]:
+ next_audio_tokens[(num_delay + 1) :] = self.config.audio_stream_bos_id
+ num_delay += 1
+ if num_remaining_delays is not None:
+ next_audio_tokens[: (self.audio_num_codebooks - num_remaining_delays)] = (
+ self.config.audio_stream_eos_id
+ )
+ num_remaining_delays -= 1
+ else:
+ all_eos_indices = (next_audio_tokens == self.config.audio_stream_eos_id).nonzero()
+ if torch.numel(all_eos_indices) > 0:
+ all_eos_indices = all_eos_indices[0]
+ last_eos_idx = all_eos_indices[-1]
+ next_audio_tokens[:last_eos_idx] = self.config.audio_stream_eos_id
+ num_remaining_delays = self.audio_num_codebooks - last_eos_idx - 1
+ if num_remaining_delays is not None and num_remaining_delays <= 0:
+ next_tokens[...] = audio_eos_token_id
+ num_delay = 0
+ num_remaining_delays = None
+
+ return (
+ next_tokens,
+ next_audio_tokens,
+ next_audio_token_logits,
+ next_audio_token_scores,
+ num_delay,
+ num_remaining_delays,
+ )
+
+ def _sample_text_tokens(
+ self,
+ logits: torch.Tensor,
+ input_ids: torch.Tensor,
+ do_sample: bool,
+ logits_processor: LogitsProcessorList,
+ device: torch.device,
+ generation_mode: GenerationMode,
+ torch_generator: Optional[torch.Generator],
+ ) -> torch.Tensor:
+ """Sample text tokens from the logits"""
+ # Clone is needed to avoid keeping a hanging ref to outputs.logits which may be very large for first iteration
+ # (the clone itself is always small)
+ next_token_logits = logits.clone()[:, -1, :].float()
+ next_token_logits = next_token_logits.to(input_ids.device)
+
+ # pre-process distribution
+ next_token_scores = logits_processor(input_ids, next_token_logits)
+
+ if generation_mode == GenerationMode.AUDIO_INIT:
+ # See the audio bos token, we should start generating audio tokens
+ next_tokens = torch.full(
+ (input_ids.shape[0],),
+ self.audio_out_token_idx,
+ dtype=torch.long,
+ device=device,
+ )
+ next_audio_tokens = torch.full(
+ (self.config.audio_num_codebooks,),
+ self.config.audio_stream_bos_id,
+ dtype=torch.long,
+ device=device,
+ )
+ else:
+ if do_sample:
+ probs = nn.functional.softmax(next_token_scores, dim=-1)
+ # TODO (joao): this OP throws "skipping cudagraphs due to ['incompatible ops']", find solution
+ next_tokens = torch.multinomial(probs, num_samples=1, generator=torch_generator).squeeze(1)
+ else:
+ next_tokens = torch.argmax(next_token_scores, dim=-1)
+
+ next_audio_tokens = None
+
+ return next_tokens, next_audio_tokens, next_token_logits, next_token_scores
+
+ # Built on top of GenerationMixin._sample.
+ # We revise the implementation to support generating both audio / text.
+ def _sample(
+ self,
+ input_ids: torch.LongTensor,
+ logits_processor: LogitsProcessorList,
+ stopping_criteria: StoppingCriteriaList,
+ generation_config: GenerationConfig,
+ synced_gpus: bool,
+ streamer: Optional["BaseStreamer"],
+ past_key_values_buckets: Optional[OrderedDict[int, Cache]],
+ **model_kwargs,
+ ) -> Union[GenerateNonBeamOutput, torch.LongTensor]:
+ r"""
+ Generates sequences of token ids for joint text/audio models using **multinomial sampling**.
+
+ This function may also be revised to support generating samples from HiggsAudio-like end-to-end text/audio models built on top of LLMs.
+ If the input_ids ends with <|audio_out_bos|>, we will switch to the audio-generation mode.
+
+ ```
+ ...<|start_header_id|>assistant<|end_header_id|>\n\n<|audio_out_bos|>
+ ```
+
+ Otherwise, we will keep generating the text tokens.
+
+ Parameters:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ The sequence used as a prompt for the generation.
+ logits_processor (`LogitsProcessorList`):
+ An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`]
+ used to modify the prediction scores of the language modeling head applied at each generation step.
+ stopping_criteria (`StoppingCriteriaList`):
+ An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`]
+ used to tell if the generation loop should stop.
+ generation_config ([`~generation.GenerationConfig`]):
+ The generation configuration to be used as parametrization of the decoding method.
+ synced_gpus (`bool`):
+ Whether to continue running the while loop until max_length (needed to avoid deadlocking with
+ `FullyShardedDataParallel` and DeepSpeed ZeRO Stage 3).
+ streamer (`BaseStreamer`, *optional*):
+ Streamer object that will be used to stream the generated sequences. Generated tokens are passed
+ through `streamer.put(token_ids)` and the streamer is responsible for any further processing.
+ model_kwargs:
+ Additional model specific kwargs will be forwarded to the `forward` function of the model. If model is
+ an encoder-decoder model the kwargs should include `encoder_outputs`.
+
+ Return:
+ [`~generation.GenerateDecoderOnlyOutput`], [`~generation.GenerateEncoderDecoderOutput`] or `torch.LongTensor`:
+ A `torch.LongTensor` containing the generated tokens (default behaviour) or a
+ [`~generation.GenerateDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and
+ `return_dict_in_generate=True` or a [`~generation.GenerateEncoderDecoderOutput`] if
+ `model.config.is_encoder_decoder=True`.
+ """
+ assert input_ids.shape[0] == 1, "Only support batch_size=1 in _sample()"
+ audio_out_bos_token_id = generation_config.generation_kwargs.get("audio_out_bos_token_id", None)
+
+ # torch generator for sampling
+ seed = generation_config.generation_kwargs.get("seed", None)
+ if seed is not None:
+ torch_generator = torch.Generator(device=input_ids.device).manual_seed(seed)
+ else:
+ torch_generator = None
+
+ # init values
+ pad_token_id = generation_config._pad_token_tensor
+ output_attentions = generation_config.output_attentions
+ output_hidden_states = generation_config.output_hidden_states
+ output_scores = generation_config.output_scores
+ output_logits = generation_config.output_logits
+ return_dict_in_generate = generation_config.return_dict_in_generate
+ max_length = generation_config.max_length
+ has_eos_stopping_criteria = any(hasattr(criteria, "eos_token_id") for criteria in stopping_criteria)
+ do_sample = generation_config.do_sample
+ # Used to track which past_key_va
+ self.current_past_key_values_bucket = None
+
+ # init attention / hidden states / scores tuples
+ scores = () if (return_dict_in_generate and output_scores) else None
+ raw_logits = () if (return_dict_in_generate and output_logits) else None
+
+ decoder_attentions = () if (return_dict_in_generate and output_attentions) else None
+ decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None
+
+ # keep track of which sequences are already finished
+ batch_size, cur_len = input_ids.shape
+ this_peer_finished = False
+ unfinished_sequences = torch.ones(batch_size, dtype=torch.long, device=input_ids.device)
+ if generation_config.use_cache:
+ model_kwargs["cache_audio_discrete_codes_mask"] = None
+
+ init_model_input = True
+ num_delay = 0
+ num_remaining_delays = None
+ audio_sequences = []
+ # A tensor to keep track of all the audio placeholder tokens.
+ input_ids_full = input_ids.clone()
+
+ # Initialize the audio variables based on the input prompt.
+ if input_ids[0][-1] == self.config.audio_out_token_idx:
+ audio_sequences = [model_kwargs["audio_out_ids"][:, model_kwargs["audio_out_ids_start"][-1] :]]
+ if self.use_delay_pattern:
+ num_delay = (
+ self.audio_num_codebooks
+ - (model_kwargs["audio_out_ids"][:, -1] == self.config.audio_stream_bos_id).sum()
+ )
+ all_eos_indices = (model_kwargs["audio_out_ids"][:, -1] == self.config.audio_stream_eos_id).nonzero()
+ if torch.numel(all_eos_indices) > 0:
+ all_eos_indices = all_eos_indices[0]
+ last_eos_idx = all_eos_indices[-1]
+ num_remaining_delays = self.audio_num_codebooks - last_eos_idx - 1
+
+ while self._has_unfinished_sequences(
+ this_peer_finished, synced_gpus, device=input_ids.device, cur_len=cur_len, max_length=max_length
+ ):
+ # Check which multimodal stage we are in
+ # FIXME: Assume single input generation
+ if input_ids[0][-1] == audio_out_bos_token_id:
+ generation_mode = GenerationMode.AUDIO_INIT
+ elif input_ids[0][-1] == self.audio_out_token_idx:
+ generation_mode = GenerationMode.AUDIO_IN_PROGRESS
+ else:
+ generation_mode = GenerationMode.TEXT
+
+ is_audio_generation_mode = generation_mode == GenerationMode.AUDIO_IN_PROGRESS
+
+ if init_model_input or not generation_config.use_cache:
+ model_inputs = {"input_ids": input_ids, **model_kwargs}
+ else:
+ model_inputs = {"input_ids": input_ids[:, -1:], **model_kwargs}
+
+ if is_audio_generation_mode and generation_config.use_cache:
+ model_inputs["audio_out_ids"] = model_kwargs["audio_out_ids"][:, -1:]
+ model_inputs["audio_out_ids_start"] = torch.tensor([0], dtype=torch.long, device=input_ids.device)
+ elif not is_audio_generation_mode:
+ del model_inputs["audio_out_ids"]
+ del model_inputs["audio_out_ids_start"]
+
+ if generation_config.use_cache:
+ if "audio_features" in model_inputs and model_inputs["audio_features"] is not None:
+ model_inputs["audio_features"] = model_inputs["audio_features"][:0, ...]
+ model_inputs["audio_feature_attention_mask"] = model_inputs["audio_feature_attention_mask"][
+ :0, ...
+ ]
+
+ if "audio_in_ids" in model_inputs and model_inputs["audio_in_ids"] is not None:
+ model_inputs["audio_in_ids"] = None
+ model_inputs["audio_in_ids_start"] = None
+
+ # prepare variable output controls (note: some models won't accept all output controls)
+ model_inputs.update({"output_attentions": output_attentions} if output_attentions else {})
+ model_inputs.update({"output_hidden_states": output_hidden_states} if output_hidden_states else {})
+
+ if past_key_values_buckets is not None:
+ past_key_values, self.current_past_key_values_bucket = self._prepare_kv_cache(
+ cur_len, self.current_past_key_values_bucket, past_key_values_buckets
+ )
+ if past_key_values is not None:
+ model_inputs.update({"past_key_values": past_key_values})
+ model_inputs["past_key_values_buckets"] = past_key_values_buckets
+
+ # forward pass to get next token
+ outputs = self(**model_inputs, return_dict=True)
+
+ # Update the actual sequence length after the first forward pass
+ if init_model_input and past_key_values_buckets is not None:
+ cur_len = past_key_values_buckets[self.current_past_key_values_bucket].get_seq_length().item()
+
+ # synced_gpus: don't waste resources running the code we don't need; kwargs must be updated before skipping
+ model_kwargs = self._update_model_kwargs_for_generation(
+ outputs,
+ model_kwargs,
+ is_encoder_decoder=self.config.is_encoder_decoder,
+ extend_attention_mask=True,
+ )
+
+ # After the first forward pass, we can set init_model_input to False.
+ init_model_input = False
+
+ if synced_gpus and this_peer_finished:
+ continue
+
+ if is_audio_generation_mode:
+ # In audio generation mode, we sample the audio tokens from audio logits.
+ # It might also generate the audio eos token to end the audio generation.
+ (
+ next_tokens,
+ next_audio_tokens,
+ next_audio_token_logits,
+ next_audio_token_scores,
+ num_delay,
+ num_remaining_delays,
+ ) = self._sample_audio_tokens(
+ hidden_states=outputs.audio_hidden_states,
+ audio_logits=outputs.audio_logits,
+ audio_out_ids=model_kwargs["audio_out_ids"],
+ do_sample=do_sample,
+ logits_processor=logits_processor,
+ device=input_ids.device,
+ torch_generator=torch_generator,
+ generation_config=generation_config,
+ num_delay=num_delay,
+ num_remaining_delays=num_remaining_delays,
+ )
+
+ # update generated ids, model inputs, and length for next step
+ model_kwargs["audio_out_ids"] = torch.cat(
+ [model_kwargs["audio_out_ids"], next_audio_tokens[:, None]], dim=-1
+ )
+ audio_sequences[-1] = torch.cat([audio_sequences[-1], next_audio_tokens[:, None]], dim=-1)
+
+ if streamer is not None:
+ streamer.put(next_audio_tokens.cpu())
+ else:
+ # In text generation mode, we sample the text tokens from text logits.
+ # It might also generate the audio placeholder token to start the audio generation.
+ next_tokens, next_audio_tokens, next_token_logits, next_token_scores = self._sample_text_tokens(
+ input_ids=input_ids,
+ logits=outputs.logits,
+ do_sample=do_sample,
+ logits_processor=logits_processor,
+ device=input_ids.device,
+ generation_mode=generation_mode,
+ torch_generator=torch_generator,
+ )
+
+ if streamer is not None:
+ streamer.put(next_tokens.cpu())
+
+ if next_audio_tokens is not None:
+ # If the token is audio bos token, we will generate the audio placeholder token
+ # and the corrensponding audio stream bos token to start the audio generation.
+ audio_sequences.append(next_audio_tokens[:, None])
+ if streamer is not None:
+ streamer.put(next_audio_tokens.cpu())
+ if model_kwargs["audio_out_ids"] is None or model_kwargs["audio_out_ids"].shape[0] == 0:
+ # Initialize audio_out_ids
+ model_kwargs["audio_out_ids"] = next_audio_tokens[:, None]
+ model_kwargs["audio_out_ids_start"] = torch.tensor(
+ [0], dtype=torch.long, device=input_ids.device
+ )
+ else:
+ model_kwargs["audio_out_ids_start"] = torch.concat(
+ [
+ model_kwargs["audio_out_ids_start"],
+ torch.tensor(
+ [model_kwargs["audio_out_ids"].shape[1]], dtype=torch.long, device=input_ids.device
+ ),
+ ],
+ dim=0,
+ )
+ model_kwargs["audio_out_ids"] = torch.concat(
+ [model_kwargs["audio_out_ids"], next_audio_tokens[:, None]], dim=1
+ )
+
+ if return_dict_in_generate:
+ if output_scores:
+ if is_audio_generation_mode:
+ scores += (next_audio_token_scores,)
+ else:
+ scores += (next_token_scores,)
+ if output_logits:
+ if is_audio_generation_mode:
+ raw_logits += (next_audio_token_logits,)
+ else:
+ raw_logits += (next_token_logits,)
+ if output_attentions:
+ decoder_attentions += (outputs.attentions,)
+ if output_hidden_states:
+ decoder_hidden_states += (outputs.hidden_states,)
+
+ # finished sentences should have their next token be a padding token
+ if has_eos_stopping_criteria:
+ next_tokens = next_tokens * unfinished_sequences + pad_token_id * (1 - unfinished_sequences)
+
+ if "tokenizer_length" in generation_config.generation_kwargs:
+ tokenizer_length = generation_config.generation_kwargs["tokenizer_length"]
+ if torch.max(next_tokens) >= tokenizer_length:
+ raise ValueError(
+ f"Next generated token has max value {torch.max(next_tokens)} which is greater than the tokenizer's vocabulary size {tokenizer_length}, this is undesired behavior."
+ )
+
+ # update generated ids, model inputs, and length for next step
+ if not is_audio_generation_mode or next_tokens[0] != self.audio_out_token_idx:
+ # We only add one <|AUDIO_OUT|> token to the input_ids for simplicity.
+ input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1)
+ input_ids_full = torch.cat([input_ids_full, next_tokens[:, None]], dim=-1)
+ unfinished_sequences = unfinished_sequences & ~stopping_criteria(input_ids_full, scores)
+ this_peer_finished = unfinished_sequences.max() == 0
+ cur_len += 1
+
+ # This is needed to properly delete outputs.logits which may be very large for first iteration
+ # Otherwise a reference to outputs is kept which keeps the logits alive in the next iteration
+ del outputs
+
+ if streamer is not None:
+ streamer.end()
+
+ if return_dict_in_generate:
+ return HiggsAudioGenerationOutput(
+ sequences=input_ids,
+ audio_sequences=audio_sequences,
+ scores=scores,
+ logits=raw_logits,
+ attentions=decoder_attentions,
+ hidden_states=decoder_hidden_states,
+ past_key_values=model_kwargs.get("past_key_values"),
+ )
+ else:
+ return input_ids, audio_sequences
+
+ @torch.inference_mode()
+ def generate(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ audio_features: Optional[torch.FloatTensor] = None,
+ audio_feature_attention_mask: Optional[torch.BoolTensor] = None,
+ audio_in_ids: Optional[torch.LongTensor] = None,
+ audio_in_ids_start: Optional[torch.LongTensor] = None,
+ audio_out_ids: Optional[torch.LongTensor] = None,
+ audio_out_ids_start: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
+ audio_out_bos_token_id: int = None,
+ audio_eos_token_id: int = None,
+ past_key_values_buckets: Optional[OrderedDict[int, Cache]] = None,
+ seed: Optional[int] = None,
+ **kwargs,
+ ):
+ """
+ The generate function in huggingface generally follows these steps:
+
+ for sample_step in 1, 2, 3, 4, 5, ...
+ ...
+
+ """
+ # Right now, it's a very simplified version of generate, we should revisit this after our model architecture stabilizes.
+ assert input_ids.shape[0] == 1, (
+ "Currently HiggsAudioModel.generate() only supports batch_size=1. See the implementation of "
+ )
+ generation_config, kwargs = self._prepare_generation_config(kwargs.pop("generation_config", None), **kwargs)
+ if audio_out_bos_token_id is not None:
+ generation_config.generation_kwargs["audio_out_bos_token_id"] = audio_out_bos_token_id
+ else:
+ try:
+ generation_config.generation_kwargs["audio_out_bos_token_id"] = self.audio_out_bos_token_id
+ except:
+ generation_config.generation_kwargs["audio_out_bos_token_id"] = None
+
+ if audio_eos_token_id is not None:
+ generation_config.generation_kwargs["audio_eos_token_id"] = audio_eos_token_id
+ else:
+ try:
+ generation_config.generation_kwargs["audio_eos_token_id"] = self.audio_eos_token_id
+ except:
+ generation_config.generation_kwargs["audio_eos_token_id"] = None
+
+ has_default_max_length = kwargs.get("max_length") is None and generation_config.max_length is not None
+ has_default_min_length = kwargs.get("min_length") is None and generation_config.min_length is not None
+
+ generation_config.generation_kwargs["ras_win_len"] = kwargs.pop("ras_win_len", None)
+ generation_config.generation_kwargs["ras_win_max_num_repeat"] = kwargs.pop("ras_win_max_num_repeat", 2)
+ # Set generation seed if determinstic generation is required
+ if seed is not None:
+ generation_config.generation_kwargs["seed"] = seed
+
+ # Store tokenizer in generation config if it is in kwargs without popping it
+ if "tokenizer" in kwargs:
+ generation_config.generation_kwargs["tokenizer_length"] = len(kwargs["tokenizer"])
+
+ # input_ids: [bsz, seq_len]
+ # The merging of audio features happens inside the forward path. The input_ids does not need to change.
+ # TODO: prepare the final input embeddings to improve generation performance
+ input_ids_length = input_ids.shape[-1]
+ generation_config = self._prepare_generated_length(
+ generation_config=generation_config,
+ has_default_max_length=has_default_max_length,
+ has_default_min_length=has_default_min_length,
+ model_input_name=None,
+ inputs_tensor=None,
+ input_ids_length=input_ids_length,
+ )
+ assert generation_config.num_beams == 1, "Currently, we only support beam search with num_beams=1"
+ return_dict_in_generate = generation_config.return_dict_in_generate
+ output_scores = generation_config.output_scores
+
+ # When attn_implement is spda or flash-attention, it will create causal mask automatically.
+ attention_mask = kwargs.pop("attention_mask", None)
+ return super().generate(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ audio_features=audio_features,
+ audio_feature_attention_mask=audio_feature_attention_mask,
+ audio_in_ids=audio_in_ids,
+ audio_in_ids_start=audio_in_ids_start,
+ audio_out_ids=audio_out_ids,
+ audio_out_ids_start=audio_out_ids_start,
+ past_key_values=past_key_values,
+ generation_config=generation_config,
+ output_scores=output_scores,
+ return_dict_in_generate=return_dict_in_generate,
+ past_key_values_buckets=past_key_values_buckets,
+ **kwargs,
+ )
+
+ def parameter_count_per_component(self):
+ """Count the number of parameters per component in the model.
+
+ HiggsAudio has the following main components:
+ audio_tower: For mapping audio features to hidden states),
+ llm_embed: The size of embedding layer of the LLM
+ llm_non_embed: The size of non-embedding layer of the LLM
+ audio_adapter: The overall size of additional layers for audio generation
+
+ """
+ trainable_stats = {
+ "audio_tower": 0,
+ "llm_embed": 0,
+ "llm_non_embed": 0,
+ "audio_embed": 0,
+ "audio_adapter": 0,
+ "overall": 0,
+ }
+ total_stats = {
+ "audio_tower": 0,
+ "llm_embed": 0,
+ "llm_non_embed": 0,
+ "audio_embed": 0,
+ "audio_adapter": 0,
+ "overall": 0,
+ }
+
+ total_stats["overall"] = count_parameters(self, trainable_only=False)
+ trainable_stats["overall"] = count_parameters(self, trainable_only=True)
+
+ for mod in [self.audio_tower]:
+ if mod is not None:
+ total_stats["audio_tower"] += count_parameters(mod, trainable_only=False)
+ trainable_stats["audio_tower"] += count_parameters(mod, trainable_only=True)
+
+ total_stats["llm_embed"] = count_parameters(self.embed_tokens, trainable_only=False)
+ trainable_stats["llm_embed"] = count_parameters(self.embed_tokens, trainable_only=True)
+
+ total_stats["audio_embed"] = count_parameters(self.audio_codebook_embeddings, trainable_only=False)
+ trainable_stats["audio_embed"] = count_parameters(self.audio_codebook_embeddings, trainable_only=True)
+
+ # Calculate number of parameters for LLM
+ for layer in self.layers:
+ if isinstance(layer, HiggsAudioDualFFNDecoderLayer):
+ total_param_count = count_parameters(layer, trainable_only=False)
+ total_trainable_param_count = count_parameters(layer, trainable_only=True)
+ total_stats["llm_non_embed"] += total_param_count
+ trainable_stats["llm_non_embed"] += total_trainable_param_count
+ if not layer.fast_forward:
+ audio_mlp_param_count = count_parameters(layer.audio_mlp, trainable_only=False)
+ audio_mlp_trainable_param_count = count_parameters(layer.audio_mlp, trainable_only=True)
+
+ audio_norm_param_count = count_parameters(
+ layer.audio_post_attention_layernorm, trainable_only=False
+ ) + count_parameters(layer.audio_input_layernorm, trainable_only=False)
+ audio_norm_trainable_param_count = count_parameters(
+ layer.audio_post_attention_layernorm, trainable_only=True
+ ) + count_parameters(layer.audio_input_layernorm, trainable_only=True)
+ total_stats["llm_non_embed"] -= audio_mlp_param_count + audio_norm_param_count
+ trainable_stats["llm_non_embed"] -= (
+ audio_mlp_trainable_param_count + audio_norm_trainable_param_count
+ )
+ total_stats["audio_adapter"] += audio_mlp_param_count + audio_norm_param_count
+ trainable_stats["audio_adapter"] += (
+ audio_mlp_trainable_param_count + audio_norm_trainable_param_count
+ )
+
+ if layer.use_audio_attention:
+ audio_attn_param_count = count_parameters(
+ layer.audio_attn, trainable_only=False
+ ) + count_parameters(layer.audio_post_audio_attn_layer_norm, trainable_only=False)
+ audio_attn_trainable_param_count = count_parameters(
+ layer.audio_attn, trainable_only=True
+ ) + count_parameters(layer.audio_post_audio_attn_layer_norm, trainable_only=True)
+ total_stats["llm_non_embed"] -= audio_attn_param_count
+ trainable_stats["llm_non_embed"] -= audio_attn_trainable_param_count
+ total_stats["audio_adapter"] += audio_attn_param_count
+ trainable_stats["audio_adapter"] += audio_attn_trainable_param_count
+ else:
+ total_stats["llm_non_embed"] += count_parameters(layer, trainable_only=False)
+ trainable_stats["llm_non_embed"] += count_parameters(layer, trainable_only=True)
+ total_stats["llm_non_embed"] += count_parameters(self.norm, trainable_only=False)
+ trainable_stats["llm_non_embed"] += count_parameters(self.norm, trainable_only=True)
+
+ total_stats["audio_adapter"] += count_parameters(self.audio_decoder_proj.audio_lm_head, trainable_only=False)
+ trainable_stats["audio_adapter"] += count_parameters(
+ self.audio_decoder_proj.audio_lm_head, trainable_only=True
+ )
+ total_stats["llm_embed"] += count_parameters(self.audio_decoder_proj.text_lm_head, trainable_only=False)
+ trainable_stats["llm_embed"] += count_parameters(self.audio_decoder_proj.text_lm_head, trainable_only=True)
+
+ other_audio_modules = [self.audio_encoder_proj]
+ if self.use_audio_out_embed_projector:
+ other_audio_modules.append(self.audio_out_embed_projector)
+
+ for mod in other_audio_modules:
+ if mod is not None:
+ total_stats["audio_adapter"] += count_parameters(mod, trainable_only=False)
+ trainable_stats["audio_adapter"] += count_parameters(mod, trainable_only=True)
+ return {"trainable": trainable_stats, "total": total_stats}
+
+ def set_skip_audio_tower(self):
+ self.config.skip_audio_tower = True
+ self.config.encode_whisper_embed = False
+
+ def set_encode_audio_in_tokens(self):
+ self.config.encode_audio_in_tokens = True
+
+ def freeze_audio_tower(self):
+ if self.audio_tower is not None:
+ for param in self.audio_tower.parameters():
+ param.requires_grad = False
+
+ def freeze_audio_encoder_proj(self):
+ if self.audio_encoder_proj is not None:
+ for param in self.audio_encoder_proj.parameters():
+ param.requires_grad = False
+
+ def freeze_llm(self, freeze_embed=True, freeze_embed_until_idx: Optional[int] = None):
+ for layer in self.layers:
+ if isinstance(layer, HiggsAudioDualFFNDecoderLayer):
+ for param in layer.self_attn.parameters():
+ param.requires_grad = False
+ for param in layer.mlp.parameters():
+ param.requires_grad = False
+
+ for param in layer.post_attention_layernorm.parameters():
+ param.requires_grad = False
+
+ for param in layer.input_layernorm.parameters():
+ param.requires_grad = False
+ else:
+ for param in layer.parameters():
+ param.requires_grad = False
+
+ for param in self.norm.parameters():
+ param.requires_grad = False
+
+ if freeze_embed:
+ if freeze_embed_until_idx is None:
+ for param in self.embed_tokens.parameters():
+ param.requires_grad = False
+ else:
+ assert isinstance(self.embed_tokens, nn.Embedding)
+ self.embed_tokens = PartiallyFrozenEmbedding(
+ original_embedding=self.embed_tokens, freeze_until_idx=freeze_embed_until_idx
+ )
+
+ def freeze_text_head(self, freeze_text_head_until_idx: Optional[int] = None):
+ """Freeze the final text head"""
+ if freeze_text_head_until_idx is None:
+ for param in self.audio_decoder_proj.text_lm_head.parameters():
+ param.requires_grad = False
+
+ else:
+ assert isinstance(self.audio_decoder_proj.text_lm_head, nn.Linear)
+ self.audio_decoder_proj.text_lm_head = PartiallyFrozenLinear(
+ original_linear=self.audio_decoder_proj.text_lm_head, freeze_until_idx=freeze_text_head_until_idx
+ )
+
+ @classmethod
+ def merge_weights_from_checkpoint(cls, checkpoint_dir: str, merged_output_dir: str, *model_args, **kwargs):
+ # For users' convenience, we merge back embedding and text_lm_head if they are splitted
+ splitted_model = super().from_pretrained(
+ checkpoint_dir,
+ *model_args,
+ torch_dtype=torch.bfloat16,
+ device_map="cpu",
+ **{**kwargs, "state_dict": None}, # Prevent auto-loading state_dict
+ )
+
+ # Load all safetensor shards
+ state_dict = {}
+ shard_paths = sorted(glob.glob(os.path.join(checkpoint_dir, "*.safetensors")))
+
+ for shard_path in shard_paths:
+ shard_dict = load_file(shard_path) # Load each shard
+ state_dict.update(shard_dict) # Merge into a single dict
+
+ # Merge weights
+ if (
+ "audio_decoder_proj.text_lm_head.linear_frozen.weight" in state_dict
+ and "audio_decoder_proj.text_lm_head.linear_trainable.weight" in state_dict
+ ):
+ state_dict["audio_decoder_proj.text_lm_head.weight"] = torch.cat(
+ [
+ state_dict["audio_decoder_proj.text_lm_head.linear_frozen.weight"],
+ state_dict["audio_decoder_proj.text_lm_head.linear_trainable.weight"],
+ ],
+ dim=0,
+ )
+
+ del state_dict["audio_decoder_proj.text_lm_head.linear_frozen.weight"]
+ del state_dict["audio_decoder_proj.text_lm_head.linear_trainable.weight"]
+
+ if (
+ "embed_tokens.embedding_frozen.weight" in state_dict
+ and "embed_tokens.embedding_trainable.weight" in state_dict
+ ):
+ state_dict["embed_tokens.weight"] = torch.cat(
+ [
+ state_dict["embed_tokens.embedding_frozen.weight"],
+ state_dict["embed_tokens.embedding_trainable.weight"],
+ ],
+ dim=0,
+ )
+
+ del state_dict["embed_tokens.embedding_frozen.weight"]
+ del state_dict["embed_tokens.embedding_trainable.weight"]
+
+ # Load the final state_dict
+ splitted_model.load_state_dict(state_dict, strict=True)
+
+ if merged_output_dir:
+ splitted_model.save_pretrained(merged_output_dir, is_main_process=True, state_dict=state_dict)
+
+ @torch.inference_mode()
+ def capture_model(self, past_key_values: list[Union[Cache, List[torch.FloatTensor]]]) -> None:
+ """Capture CUDA graphs for the model's forward pass with different KV cache lengths.
+
+ Args:
+ past_key_values: List of KV caches to capture graphs for
+ """
+ for past_key_value in past_key_values:
+ kv_cache_length = past_key_value.get_max_cache_shape()
+ # We capture two graphs, one for decoding audio tokens and one for decoding text tokens
+ for is_decoding_audio_token in [True, False]:
+ runner = CUDAGraphRunner(self._forward_core)
+
+ # Create dummy inputs for graph capture
+ batch_size = 1
+ hidden_dim = self.config.hidden_size
+
+ hidden_states = torch.zeros(
+ (batch_size, 1, hidden_dim), dtype=None, device=self.device
+ )
+ causal_mask = torch.ones(
+ (batch_size, 1, 1, kv_cache_length), dtype=None, device=self.device
+ )
+ position_ids = torch.zeros((batch_size, 1), dtype=torch.long, device=self.device)
+ audio_discrete_codes_mask = torch.tensor(
+ [[is_decoding_audio_token]], dtype=torch.bool, device=self.device
+ )
+ cache_position = torch.tensor([kv_cache_length - 1], dtype=torch.long, device=self.device)
+ audio_attention_mask = torch.ones_like(causal_mask)
+ fast_forward_attention_mask = torch.ones_like(causal_mask)
+
+ runner.capture(
+ hidden_states=hidden_states,
+ causal_mask=causal_mask,
+ position_ids=position_ids,
+ audio_discrete_codes_mask=audio_discrete_codes_mask,
+ cache_position=cache_position,
+ past_key_values=past_key_value,
+ use_cache=True,
+ audio_attention_mask=audio_attention_mask,
+ fast_forward_attention_mask=fast_forward_attention_mask,
+ output_attentions=False,
+ output_hidden_states=False,
+ is_decoding_audio_token=is_decoding_audio_token,
+ is_using_cuda_graph=True,
+ #stream=torch.cuda.Stream(device=self.device),
+ )
+
+ self.decode_graph_runners[kv_cache_length][is_decoding_audio_token] = runner
\ No newline at end of file
diff --git a/comfy/ldm/higgsv2/tokenizer.py b/comfy/ldm/higgsv2/tokenizer.py
new file mode 100644
index 000000000..c4277291c
--- /dev/null
+++ b/comfy/ldm/higgsv2/tokenizer.py
@@ -0,0 +1,872 @@
+import os
+import math
+import torch
+import torch.nn as nn
+from typing import Optional
+import torch.nn.functional as F
+from torch.nn.utils.parametrizations import weight_norm
+import torchaudio
+import numpy as np
+from torch import vmap
+from transformers import AutoModel
+
+def WNConv1d(*args, device = None, dtype = None, operations = None, **kwargs):
+ return weight_norm(operations.Conv1d(*args, **kwargs, device = device, dtype = dtype))
+
+
+def WNConvTranspose1d(*args, device = None, dtype = None, operations = None, **kwargs):
+ return weight_norm(operations.ConvTranspose1d(*args, **kwargs, device = device, dtype = dtype))
+
+
+@torch.jit.script
+def snake(x, alpha):
+ shape = x.shape
+ x = x.reshape(shape[0], shape[1], -1)
+ x = x + (alpha + 1e-9).reciprocal() * torch.sin(alpha * x).pow(2)
+ x = x.reshape(shape)
+ return x
+
+
+class Snake1d(nn.Module):
+ def __init__(self, channels, device = None, dtype = None):
+ super().__init__()
+ self.alpha = nn.Parameter(torch.ones(1, channels, 1, device = device, dtype = dtype))
+
+ def forward(self, x):
+ return snake(x, self.alpha)
+
+class DACResidualUnit(nn.Module):
+ def __init__(self, dim: int = 16, dilation: int = 1, device = None, dtype = None, operations = None):
+ super().__init__()
+ pad = ((7 - 1) * dilation) // 2
+ self.block = nn.Sequential(
+ Snake1d(dim, device = device, dtype = dtype),
+ WNConv1d(dim, dim, kernel_size=7, dilation=dilation, padding=pad, device = device, dtype = dtype, operations = operations),
+ Snake1d(dim, device = device, dtype = dtype),
+ WNConv1d(dim, dim, kernel_size=1, device = device, dtype = dtype, operations = operations),
+ )
+
+ def forward(self, x):
+ y = self.block(x)
+ pad = (x.shape[-1] - y.shape[-1]) // 2
+ if pad > 0:
+ x = x[..., pad:-pad]
+ return x + y
+
+
+class DACEncoderBlock(nn.Module):
+ def __init__(self, dim: int = 16, stride: int = 1, device = None, dtype = None, operations = None):
+ super().__init__()
+ self.block = nn.Sequential(
+ DACResidualUnit(dim // 2, dilation=1, device = device, dtype = dtype, operations = operations),
+ DACResidualUnit(dim // 2, dilation=3, device = device, dtype = dtype, operations = operations),
+ DACResidualUnit(dim // 2, dilation=9, device = device, dtype = dtype, operations = operations),
+ Snake1d(dim // 2),
+ WNConv1d(
+ dim // 2,
+ dim,
+ kernel_size=2 * stride,
+ stride=stride,
+ padding=math.ceil(stride / 2),
+ device = device, dtype = dtype, operations = operations
+ ),
+ )
+
+ def forward(self, x):
+ return self.block(x)
+
+
+class DACEncoder(nn.Module):
+ def __init__(
+ self,
+ d_model: int = 64,
+ strides: list = [2, 4, 8, 8],
+ d_latent: int = 256,
+ device = None, dtype = None, operations = None
+ ):
+ super().__init__()
+ # Create first convolution
+ self.block = [WNConv1d(1, d_model, kernel_size=7, padding=3, device = device, dtype = dtype, operations = operations)]
+
+ # Create EncoderBlocks that double channels as they downsample by `stride`
+ for stride in strides:
+ d_model *= 2
+ self.block += [DACEncoderBlock(d_model, stride=stride, device = device, dtype = dtype, operations = operations)]
+
+ # Create last convolution
+ self.block += [
+ Snake1d(d_model),
+ WNConv1d(d_model, d_latent, kernel_size=3, padding=1, device = device, dtype = dtype, operations = operations),
+ ]
+
+ # Wrap black into nn.Sequential
+ self.block = nn.Sequential(*self.block)
+ self.enc_dim = d_model
+
+ def forward(self, x):
+ return self.block(x)
+
+
+class DACDecoderBlock(nn.Module):
+ def __init__(self, input_dim: int = 16, output_dim: int = 8, stride: int = 1, device = None, dtype = None, operations = None):
+ super().__init__()
+ self.block = nn.Sequential(
+ Snake1d(input_dim, device = device, dtype = dtype),
+ WNConvTranspose1d(
+ input_dim,
+ output_dim,
+ kernel_size=2 * stride,
+ stride=stride,
+ padding=math.ceil(stride / 2),
+ output_padding=stride % 2, # out_pad,
+ device = device, dtype = dtype, operations = operations
+ ),
+ DACResidualUnit(output_dim, dilation=1, device = device, dtype = dtype, operations = operations),
+ DACResidualUnit(output_dim, dilation=3, device = device, dtype = dtype, operations = operations),
+ DACResidualUnit(output_dim, dilation=9, device = device, dtype = dtype, operations = operations),
+ )
+
+ def forward(self, x):
+ return self.block(x)
+
+
+class DACDecoder(nn.Module):
+ def __init__(
+ self,
+ input_channel,
+ channels,
+ rates,
+ d_out: int = 1,
+ device = None, dtype = None, operations = None
+ ):
+ super().__init__()
+
+ # Add first conv layer
+ layers = [WNConv1d(input_channel, channels, kernel_size=7, padding=3, device = device, dtype = dtype, operations = operations )]
+
+ # Add upsampling + MRF blocks
+ for i, stride in enumerate(rates):
+ input_dim = channels // 2**i
+ output_dim = channels // 2 ** (i + 1)
+ layers += [DACDecoderBlock(input_dim, output_dim, stride, device = device, dtype = dtype, operations = operations)]
+
+ # Add final conv layer
+ layers += [
+ Snake1d(output_dim, device = device, dtype = dtype),
+ WNConv1d(output_dim, d_out, kernel_size=7, padding=3, device = device, dtype = dtype, operations = operations),
+ ]
+
+ self.model = nn.Sequential(*layers)
+
+ def forward(self, x):
+ return self.model(x)
+
+class Conv1d1x1:
+ def __new__(cls, in_channels, out_channels, bias=True, device=None, dtype=None, operations=None):
+ operations = operations or nn
+ return operations.Conv1d(
+ in_channels, out_channels, kernel_size=1,
+ bias=bias, device=device, dtype=dtype
+ )
+
+class Conv1d(nn.Module):
+ def __init__(
+ self,
+ in_channels: int,
+ out_channels: int,
+ kernel_size: int,
+ stride: int = 1,
+ padding: int = -1,
+ dilation: int = 1,
+ groups: int = 1,
+ bias: bool = True,
+ device = None, dtype = None, operations = None
+ ):
+ super().__init__()
+
+ if padding < 0:
+ padding = (kernel_size - 1) // 2 * dilation
+
+ self.dilation = dilation
+ self.conv = operations.Conv1d(
+ in_channels=in_channels,
+ out_channels=out_channels,
+ kernel_size=kernel_size,
+ stride=stride,
+ padding=padding,
+ dilation=dilation,
+ groups=groups,
+ bias=bias,
+ device = device, dtype = dtype
+ )
+
+ def forward(self, x):
+ x = self.conv(x)
+ return x
+
+class ConvTranspose1d(nn.Module):
+ def __init__(
+ self,
+ in_channels: int,
+ out_channels: int,
+ kernel_size: int,
+ stride: int,
+ padding=-1,
+ output_padding=-1,
+ groups=1,
+ bias=True,
+ device = None, dtype = None, operations = None
+ ):
+ super().__init__()
+ if padding < 0:
+ padding = (stride + 1) // 2
+ if output_padding < 0:
+ output_padding = 1 if stride % 2 else 0
+ self.deconv = operations.ConvTranspose1d(
+ in_channels=in_channels,
+ out_channels=out_channels,
+ kernel_size=kernel_size,
+ stride=stride,
+ padding=padding,
+ output_padding=output_padding,
+ groups=groups,
+ bias=bias,
+ device = device, dtype = dtype
+ )
+
+ def forward(self, x):
+ x = self.deconv(x)
+ return x
+
+class ResidualUnit(nn.Module):
+ def __init__(
+ self,
+ in_channels: int,
+ out_channels: int,
+ kernel_size=3,
+ dilation=1,
+ bias=False,
+ nonlinear_activation="ELU",
+ nonlinear_activation_params={},
+ device = None, dtype = None, operations = None
+ ):
+ super().__init__()
+ self.activation = getattr(nn, nonlinear_activation)(**nonlinear_activation_params)
+ self.conv1 = Conv1d(
+ in_channels=in_channels,
+ out_channels=out_channels,
+ kernel_size=kernel_size,
+ stride=1,
+ dilation=dilation,
+ bias=bias,
+ device = device, dtype = dtype, operations = operations
+ )
+ self.conv2 = Conv1d1x1(out_channels, out_channels, bias, device = device, dtype = dtype, operations = operations)
+
+ def forward(self, x):
+ y = self.conv1(self.activation(x))
+ y = self.conv2(self.activation(y))
+ return x + y
+
+
+class EncoderBlock(nn.Module):
+ def __init__(
+ self, in_channels: int, out_channels: int, stride: int, dilations=(1, 1), unit_kernel_size=3, bias=True, device = None, dtype = None, operations = None
+ ):
+ super().__init__()
+ self.res_units = torch.nn.ModuleList()
+ for dilation in dilations:
+ self.res_units += [ResidualUnit(in_channels, in_channels, kernel_size=unit_kernel_size, dilation=dilation, device = device, dtype = dtype, operations = operations)]
+ self.num_res = len(self.res_units)
+
+ kernel_size=3 if stride == 1 else (2 * stride) # special case: stride=1, do not use kernel=2
+ self.conv = Conv1d(
+ in_channels=in_channels,
+ out_channels=out_channels,
+ kernel_size = kernel_size,
+ stride=stride,
+ bias=bias,
+ device = device, dtype = dtype, operations = operations
+ )
+
+ def forward(self, x):
+ for idx in range(self.num_res):
+ x = self.res_units[idx](x)
+ x = self.conv(x)
+ return x
+
+
+class Encoder(nn.Module):
+ def __init__(
+ self,
+ input_channels: int,
+ encode_channels: int,
+ channel_ratios=(1, 1),
+ strides=(1, 1),
+ kernel_size=3,
+ bias=True,
+ block_dilations=(1, 1),
+ unit_kernel_size=3,
+ device = None, dtype = None, operations = None
+ ):
+ super().__init__()
+ assert len(channel_ratios) == len(strides)
+ self.conv = Conv1d(
+ in_channels=input_channels, out_channels=encode_channels, kernel_size=kernel_size, stride=1, bias=False,
+ device = device, dtype = dtype, operations = operations
+ )
+ self.conv_blocks = torch.nn.ModuleList()
+ in_channels = encode_channels
+ for idx, stride in enumerate(strides):
+ out_channels = int(encode_channels * channel_ratios[idx]) # could be float
+ self.conv_blocks += [
+ EncoderBlock(
+ in_channels,
+ out_channels,
+ stride,
+ dilations=block_dilations,
+ unit_kernel_size=unit_kernel_size,
+ bias=bias,
+ device = device, dtype = dtype, operations = operations
+ )
+ ]
+ in_channels = out_channels
+ self.num_blocks = len(self.conv_blocks)
+ self.out_channels = out_channels
+
+ def forward(self, x):
+ x = self.conv(x)
+ for i in range(self.num_blocks):
+ x = self.conv_blocks[i](x)
+ return x
+
+
+class DecoderBlock(nn.Module):
+ """Decoder block (no up-sampling)"""
+
+ def __init__(
+ self, in_channels: int, out_channels: int, stride: int, dilations=(1, 1), unit_kernel_size=3, bias=True, device = None, dtype = None, operations = None
+ ):
+ super().__init__()
+
+ if stride == 1:
+ self.conv = Conv1d(
+ in_channels=in_channels,
+ out_channels=out_channels,
+ kernel_size=3, # fix kernel=3 when stride=1 for unchanged shape
+ stride=stride,
+ bias=bias,
+ device = device, dtype = dtype, operations = operations
+ )
+ else:
+ self.conv = ConvTranspose1d(
+ in_channels=in_channels,
+ out_channels=out_channels,
+ kernel_size=(2 * stride),
+ stride=stride,
+ bias=bias,
+ device = device, dtype = dtype, operations = operations
+ )
+
+ self.res_units = nn.ModuleList([
+ ResidualUnit(out_channels, out_channels, kernel_size=unit_kernel_size, dilation=d, device = device, dtype = dtype, operations = operations)
+ for d in dilations
+ ])
+
+ self.num_res = len(self.res_units)
+
+ def forward(self, x):
+ x = self.conv(x)
+ for idx in range(self.num_res):
+ x = self.res_units[idx](x)
+ return x
+
+
+class Decoder(nn.Module):
+ def __init__(
+ self,
+ code_dim: int,
+ output_channels: int,
+ decode_channels: int,
+ channel_ratios=(1, 1),
+ strides=(1, 1),
+ kernel_size=3,
+ bias=True,
+ block_dilations=(1, 1),
+ unit_kernel_size=3,
+ device = None, dtype = None, operations = None
+ ):
+ super().__init__()
+ assert len(channel_ratios) == len(strides)
+ self.conv1 = Conv1d(
+ in_channels=code_dim,
+ out_channels=int(decode_channels * channel_ratios[0]),
+ kernel_size=kernel_size,
+ stride=1,
+ bias=False,
+ device = device, dtype = dtype, operations = operations
+ )
+
+ self.conv_blocks = torch.nn.ModuleList()
+ for idx, stride in enumerate(strides):
+ in_channels = int(decode_channels * channel_ratios[idx])
+ if idx < (len(channel_ratios) - 1):
+ out_channels = int(decode_channels * channel_ratios[idx + 1])
+ else:
+ out_channels = decode_channels
+ self.conv_blocks += [
+ DecoderBlock(
+ in_channels,
+ out_channels,
+ stride,
+ dilations=block_dilations,
+ unit_kernel_size=unit_kernel_size,
+ bias=bias,
+ device = device, dtype = dtype, operations = operations
+ )
+ ]
+ self.num_blocks = len(self.conv_blocks)
+
+ self.conv2 = Conv1d(out_channels, output_channels, kernel_size = 3, bias=False, device = device, dtype = dtype, operations = operations)
+
+ def forward(self, z):
+ x = self.conv1(z)
+ for i in range(self.num_blocks):
+ x = self.conv_blocks[i](x)
+ x = self.conv2(x)
+ return x
+
+class HiggsAudioFeatureExtractor(nn.Module):
+ def __init__(self, sampling_rate=16000):
+ super().__init__()
+ self.sampling_rate = sampling_rate
+
+ def forward(self, audio_signal):
+ audio_signal = audio_signal.unsqueeze(0)
+ if len(audio_signal.shape) < 3:
+ audio_signal = audio_signal.unsqueeze(0)
+ return {"input_values": audio_signal}
+
+def uniform_init(*shape: int, device = None, dtype = None):
+ t = torch.empty(shape, device = device, dtype = dtype)
+ nn.init.kaiming_uniform_(t)
+ return t
+
+class EuclideanCodebook(nn.Module):
+
+ def __init__(
+ self,
+ dim: int,
+ codebook_size: int,
+ kmeans_init: int = False,
+ kmeans_iters: int = 10,
+ decay: float = 0.99,
+ epsilon: float = 1e-5,
+ threshold_ema_dead_code: int = 2,
+ device = None, dtype = None
+ ):
+ super().__init__()
+ self.decay = decay
+ init_fn = uniform_init
+ embed = init_fn(codebook_size, dim, device = device, dtype = dtype)
+
+ self.codebook_size = codebook_size
+
+ self.kmeans_iters = kmeans_iters
+ self.epsilon = epsilon
+ self.threshold_ema_dead_code = threshold_ema_dead_code
+
+ # Flag variable to indicate whether the codebook is initialized
+ self.register_buffer("inited", torch.Tensor([not kmeans_init]))
+ # Runing EMA cluster size/count: N_i^t in eq. (6) in vqvae paper
+ self.register_buffer("cluster_size", torch.zeros(codebook_size))
+ # Codebook
+ self.register_buffer("embed", embed)
+ # EMA codebook: eq. (7) in vqvae paper
+ self.register_buffer("embed_avg", embed.clone())
+
+ def preprocess(self, x):
+ x = x.view(-1, x.shape[-1])
+ return x
+
+ def quantize(self, x):
+ embed = self.embed.t()
+ if x.dtype != embed.dtype:
+ x = x.to(embed.dtype)
+
+ dist = -(x.pow(2).sum(1, keepdim=True) - 2 * x @ embed + embed.pow(2).sum(0, keepdim=True))
+ embed_ind = dist.max(dim=-1).indices
+ return embed_ind
+
+ def postprocess_emb(self, embed_ind, shape):
+ return embed_ind.view(*shape[:-1])
+
+ def dequantize(self, embed_ind):
+ quantize = F.embedding(embed_ind, self.embed)
+ return quantize
+
+ def encode(self, x):
+ shape = x.shape
+ # pre-process
+ x = self.preprocess(x) # [B, T, D] -> [B*T, D]
+ # quantize
+ embed_ind = self.quantize(x)
+ # post-process
+ embed_ind = self.postprocess_emb(embed_ind, shape)
+ return embed_ind
+
+ def decode(self, embed_ind):
+ quantize = self.dequantize(embed_ind)
+ return quantize
+
+ def forward(self, x):
+ orig_shape = x.shape # [B, T, D]
+ flat = x.view(-1, x.shape[-1]) # [B*T, D]
+
+ embed_ind = self.quantize(flat)
+ embed_ind = self.postprocess_emb(embed_ind, orig_shape)
+ # now embed_ind has shape [B, T]
+
+ quantize = self.dequantize(embed_ind)
+ # quantize: [B, T, D]
+
+ return quantize, embed_ind
+
+class VectorQuantization(nn.Module):
+
+ def __init__(
+ self,
+ dim: int,
+ codebook_size: int,
+ codebook_dim: Optional[int] = None,
+ decay: float = 0.99,
+ epsilon: float = 1e-5,
+ kmeans_init: bool = True,
+ kmeans_iters: int = 50,
+ threshold_ema_dead_code: int = 2,
+ commitment_weight: float = 1.0,
+ device = None, dtype = None, operations = None
+ ):
+ super().__init__()
+ _codebook_dim: int = codebook_dim if codebook_dim is not None else dim
+
+ requires_projection = _codebook_dim != dim
+ self.project_in = operations.Linear(dim, _codebook_dim, device = device, dtype = dtype) if requires_projection else nn.Identity()
+ self.project_out = operations.Linear(_codebook_dim, dim, device = device, dtype = dtype) if requires_projection else nn.Identity()
+
+ self.epsilon = epsilon
+ self.commitment_weight = commitment_weight
+
+ self._codebook = EuclideanCodebook(
+ dim=_codebook_dim,
+ codebook_size=codebook_size,
+ kmeans_init=kmeans_init,
+ kmeans_iters=kmeans_iters,
+ decay=decay,
+ epsilon=epsilon,
+ threshold_ema_dead_code=threshold_ema_dead_code,
+ device = device, dtype = dtype
+ )
+ self.codebook_size = codebook_size
+
+ @property
+ def codebook(self):
+ return self._codebook.embed
+
+ def encode(self, x):
+ x = x.permute(0, 2, 1)
+ x = self.project_in(x)
+ embed_in = self._codebook.encode(x)
+ return embed_in
+
+ def decode(self, embed_ind):
+ quantize = self._codebook.decode(embed_ind)
+ quantize = self.project_out(quantize)
+ quantize = quantize.permute(0, 2, 1)
+ return quantize
+
+ def forward(self, x):
+ device = x.device
+ x = x.transpose(1, 2).contiguous() # [b d n] -> [b n d]
+ x = self.project_in(x)
+
+ quantize, embed_ind = self._codebook(x)
+
+ loss = torch.tensor([0.0], device=device, requires_grad=self.training)
+
+ quantize = self.project_out(quantize)
+ quantize = quantize.transpose(1, 2).contiguous() # [b n d] -> [b d n]
+ return quantize, embed_ind, loss
+
+
+class ResidualVectorQuantization(nn.Module):
+ def __init__(self, *, num_quantizers, device = None, dtype = None, operations = None, **kwargs):
+ super().__init__()
+ self.layers = nn.ModuleList([VectorQuantization(device = device, dtype = dtype, operations = operations, **kwargs) for _ in range(num_quantizers)])
+
+ def forward(self, x, n_q: Optional[int] = None):
+ quantized_out = 0.0
+ residual = x
+
+ all_losses = []
+ all_indices = []
+
+ n_q = n_q or len(self.layers)
+
+ for layer in self.layers[:n_q]:
+ quantized, indices, loss = layer(residual)
+ residual = residual - quantized
+ quantized_out = quantized_out + quantized
+
+ all_indices.append(indices)
+ all_losses.append(loss)
+
+ out_losses, out_indices = map(torch.stack, (all_losses, all_indices))
+ return quantized_out, out_indices, out_losses
+
+ def decode(self, q_indices: torch.Tensor) -> torch.Tensor:
+ """ Vectorized Implementation of dequantization | 2x faster than original impl """
+
+ biases = torch.stack([layer.project_out.bias for layer in self.layers])
+
+ codebook_device = self.layers[0]._codebook.embed.device
+ q_indices = q_indices.to(codebook_device)
+
+ def decode_one(codebook_weight, proj_weight, embed_id, proj_biases):
+ quantized = F.embedding(embed_id, codebook_weight).transpose(1, 2) # (B, D, T)
+ quantized = F.linear(quantized.transpose(1, 2), proj_weight, proj_biases).transpose(1, 2)
+ return quantized
+
+ codebook_weights = torch.stack([q._codebook.embed for q in self.layers]) # (n_codebooks, vocab_size, D)
+ proj_weights = torch.stack([q.project_out.weight for q in self.layers])
+
+ quantized = vmap(decode_one)(codebook_weights, proj_weights, q_indices, biases)
+
+ return quantized.sum(0)
+
+class ResidualVectorQuantizer(nn.Module):
+
+ def __init__(
+ self,
+ dimension: int = 256,
+ codebook_dim: int = None,
+ n_q: int = 8,
+ bins: int = 1024,
+ decay: float = 0.99,
+ kmeans_init: bool = True,
+ kmeans_iters: int = 50,
+ threshold_ema_dead_code: int = 2,
+ device = None,
+ dtype = None,
+ operations = None
+ ):
+ super().__init__()
+ self.n_q = n_q
+ self.dimension = dimension
+ self.codebook_dim = codebook_dim
+ self.bins = bins
+ self.decay = decay
+ self.kmeans_init = kmeans_init
+ self.kmeans_iters = kmeans_iters
+ self.threshold_ema_dead_code = threshold_ema_dead_code
+ self.vq = ResidualVectorQuantization(
+ dim=self.dimension,
+ codebook_dim=self.codebook_dim,
+ codebook_size=self.bins,
+ num_quantizers=self.n_q,
+ decay=self.decay,
+ kmeans_init=self.kmeans_init,
+ kmeans_iters=self.kmeans_iters,
+ threshold_ema_dead_code=self.threshold_ema_dead_code,
+ device = device, dtype = dtype, operations = operations
+ )
+
+ def forward(self, x: torch.Tensor, sample_rate: int, bandwidth: Optional[float] = None): # -> QuantizedResult:
+
+ bw_per_q = self.get_bandwidth_per_quantizer(sample_rate)
+ n_q = self.get_num_quantizers_for_bandwidth(sample_rate, bandwidth)
+ quantized, codes, commit_loss = self.vq(x, n_q=n_q)
+ bw = torch.tensor(n_q * bw_per_q).to(x)
+ return quantized, codes, bw, torch.mean(commit_loss)
+
+ def get_num_quantizers_for_bandwidth(self, sample_rate: int, bandwidth: Optional[float] = None) -> int:
+ """Return n_q based on specified target bandwidth."""
+ bw_per_q = self.get_bandwidth_per_quantizer(sample_rate)
+ n_q = self.n_q
+ if bandwidth and bandwidth > 0.0:
+ n_q = int(max(1, math.floor(bandwidth / bw_per_q)))
+ return n_q
+
+ def get_bandwidth_per_quantizer(self, sample_rate: int):
+ """Return bandwidth per quantizer for a given input sample rate."""
+ return math.log2(self.bins) * sample_rate / 1000
+
+ def decode(self, codes: torch.Tensor) -> torch.Tensor:
+ """Decode the given codes to the quantized representation."""
+ quantized = self.vq.decode(codes)
+ return quantized
+
+class HiggsAudioTokenizer(nn.Module):
+ def __init__(
+ self,
+ D: int = 256,
+ target_bandwidths= [0.5, 1, 1.5, 2, 4],
+ ratios = [8, 5, 4, 2, 3], # downsampling by 320
+ sample_rate: int = 24000,
+ bins: int = 1024,
+ n_q: int = 8,
+ codebook_dim: int = 64,
+ last_layer_semantic: bool = True,
+ downsample_mode: str = "step_down",
+ vq_scale: int = 1,
+ semantic_sample_rate: int = None,
+ device = None,
+ dtype = None,
+ operations = None,
+ **kwargs
+ ):
+ super().__init__()
+ operations = operations or nn
+ self.hop_length = np.prod(ratios)
+
+ self.frame_rate = math.ceil(sample_rate / np.prod(ratios)) # 50 Hz
+
+ self.target_bandwidths = target_bandwidths
+ self.n_q = n_q
+ self.sample_rate = sample_rate
+ self.encoder = DACEncoder(64, ratios, D, device = device, dtype = dtype, operations = operations)
+
+ self.decoder_2 = DACDecoder(D, 1024, ratios, device = device, dtype = dtype, operations = operations)
+ self.last_layer_semantic = last_layer_semantic
+ self.device = device
+
+ self.semantic_model = AutoModel.from_pretrained("bosonai/hubert_base", trust_remote_code=True)
+ self.semantic_sample_rate = 16000
+ self.semantic_dim = 768
+ self.encoder_semantic_dim = 768
+
+ # Overwrite semantic model sr to ensure semantic_downsample_factor is an integer
+ if semantic_sample_rate is not None:
+ self.semantic_sample_rate = semantic_sample_rate
+
+ self.semantic_model.eval()
+
+ # make the semantic model parameters do not need gradient
+ for param in self.semantic_model.parameters():
+ param.requires_grad = False
+
+ self.semantic_downsample_factor = int(self.hop_length / (self.sample_rate / self.semantic_sample_rate) / 320)
+
+ self.quantizer_dim = int((D + self.encoder_semantic_dim) // vq_scale)
+ self.encoder_semantic = Encoder(input_channels=self.semantic_dim, encode_channels=self.encoder_semantic_dim, device = device, dtype = dtype, operations = operations)
+ self.decoder_semantic = Decoder(
+ code_dim=self.encoder_semantic_dim, output_channels=self.semantic_dim, decode_channels=self.semantic_dim, device = device, dtype = dtype, operations = operations
+ )
+
+ self.quantizer = ResidualVectorQuantizer(
+ dimension=self.quantizer_dim, codebook_dim=codebook_dim, n_q=n_q, bins=bins, device = device, dtype = dtype, operations = operations
+ )
+
+ self.fc_prior = operations.Linear(D + self.encoder_semantic_dim, self.quantizer_dim, device = device, dtype = dtype)
+ self.fc_post1 = operations.Linear(self.quantizer_dim, self.encoder_semantic_dim, device = device, dtype = dtype)
+ self.fc_post2 = operations.Linear(self.quantizer_dim, D, device = device, dtype = dtype)
+
+ self.downsample_mode = downsample_mode
+
+ self.audio_tokenizer_feature_extractor = HiggsAudioFeatureExtractor(sampling_rate=self.sample_rate)
+
+ @property
+ def sampling_rate(self):
+ return self.sample_rate
+
+ @torch.no_grad()
+ def get_regress_target(self, x):
+ x = torchaudio.functional.resample(x, self.sample_rate, self.semantic_sample_rate)
+
+ x = x[:, 0, :]
+ x = F.pad(x, (160, 160))
+ target = self.semantic_model(x, output_hidden_states=True).hidden_states
+ target = torch.stack(target, dim=1)
+
+ target = target.mean(1)
+
+ if self.downsample_mode == "step_down":
+ if self.semantic_downsample_factor > 1:
+ target = target[:, :: self.semantic_downsample_factor, :]
+
+ return target
+
+ def forward(self):
+ pass
+
+ @property
+ def tps(self):
+ return self.frame_rate
+
+ def encode(self, wv, sr):
+
+ if sr != self.sampling_rate:
+ # best computed values to match librosa's resample
+ resampler_torch = torchaudio.transforms.Resample(
+ orig_freq=sr,
+ new_freq=self.sampling_rate,
+ resampling_method="sinc_interp_kaiser",
+ lowpass_filter_width = 121,
+ rolloff = 0.9568384289091556,
+ beta = 21.01531462440614
+ ).to(wv.device)
+
+ wv = resampler_torch(wv)
+
+ if self.audio_tokenizer_feature_extractor is not None:
+ inputs = self.audio_tokenizer_feature_extractor(wv)
+ input_values = inputs["input_values"].to(self.device)
+ else:
+ input_values = torch.from_numpy(wv).float().unsqueeze(0)
+ with torch.no_grad():
+ input_values = input_values.to(wv.device)
+ encoder_outputs = self._xcodec_encode(input_values)
+ vq_code = encoder_outputs[0]
+ return vq_code
+
+ def _xcodec_encode(self, x: torch.Tensor, target_bw: Optional[int] = None) -> torch.Tensor:
+ bw = target_bw
+
+ e_semantic_input = self.get_regress_target(x).detach()
+
+ e_semantic = self.encoder_semantic(e_semantic_input.transpose(1, 2))
+ e_acoustic = self.encoder(x)
+
+ if e_acoustic.shape[2] != e_semantic.shape[2]:
+ pad_size = 160 * self.semantic_downsample_factor
+ e_acoustic = self.encoder(F.pad(x[:, 0, :], (pad_size, pad_size)).unsqueeze(0))
+
+ if e_acoustic.shape[2] != e_semantic.shape[2]:
+ if e_acoustic.shape[2] > e_semantic.shape[2]:
+ e_acoustic = e_acoustic[:, :, : e_semantic.shape[2]]
+ else:
+ e_semantic = e_semantic[:, :, : e_acoustic.shape[2]]
+
+ e = torch.cat([e_acoustic, e_semantic], dim=1)
+
+ e = self.fc_prior(e.transpose(1, 2))
+
+ e = e.transpose(1, 2)
+ _, codes, _, _ = self.quantizer(e, self.frame_rate, bw)
+ codes = codes.permute(1, 0, 2)
+
+ return codes
+
+ def decode(self, vq_code: torch.Tensor) -> torch.Tensor:
+ vq_code = vq_code.to(self.device)
+
+ if vq_code.ndim < 3:
+ vq_code = vq_code.unsqueeze(0)
+
+ vq_code = vq_code.permute(1, 0, 2)
+ quantized = self.quantizer.decode(vq_code)
+ quantized = quantized.transpose(1, 2)
+ quantized_acoustic = self.fc_post2(quantized).transpose(1, 2)
+
+ o = self.decoder_2(quantized_acoustic)
+ return o.detach()
diff --git a/comfy/model_base.py b/comfy/model_base.py
index 4392355ea..3da925695 100644
--- a/comfy/model_base.py
+++ b/comfy/model_base.py
@@ -42,6 +42,7 @@ import comfy.ldm.hidream.model
import comfy.ldm.chroma.model
import comfy.ldm.ace.model
import comfy.ldm.omnigen.omnigen2
+import comfy.ldm.higgsv2.model
import comfy.model_management
import comfy.patcher_extension
@@ -1277,3 +1278,7 @@ class Omnigen2(BaseModel):
if ref_latents is not None:
out['ref_latents'] = list([1, 16, sum(map(lambda a: math.prod(a.size()), ref_latents)) // 16])
return out
+
+class Higgsv2(BaseModel):
+ def __init__(self, model_config, model_type=ModelType.EPS, device=None, unet_model=comfy.ldm.higgsv2.model.HiggsAudioModel):
+ super().__init__(model_config, model_type, device, unet_model)
\ No newline at end of file
diff --git a/comfy/model_detection.py b/comfy/model_detection.py
index 18232ade3..cdc15849d 100644
--- a/comfy/model_detection.py
+++ b/comfy/model_detection.py
@@ -388,6 +388,87 @@ def detect_unet_config(state_dict, key_prefix, metadata=None):
dit_config["guidance_embed"] = "{}guidance_in.in_layer.weight".format(key_prefix) in state_dict_keys
return dit_config
+ if f"{key_prefix}t_embedder.mlp.2.weight" in state_dict_keys: # Hunyuan 3D 2.1
+
+ dit_config = {}
+ dit_config["image_model"] = "hunyuan3d2_1"
+ dit_config["in_channels"] = state_dict[f"{key_prefix}x_embedder.weight"].shape[1]
+ dit_config["context_dim"] = 1024
+ dit_config["hidden_size"] = state_dict[f"{key_prefix}x_embedder.weight"].shape[0]
+ dit_config["mlp_ratio"] = 4.0
+ dit_config["num_heads"] = 16
+ dit_config["depth"] = count_blocks(state_dict_keys, f"{key_prefix}blocks.{{}}")
+ dit_config["qkv_bias"] = False
+ dit_config["guidance_cond_proj_dim"] = None#f"{key_prefix}t_embedder.cond_proj.weight" in state_dict_keys
+ return dit_config
+
+ if "{}layers.27.audio_post_attention_layernorm.weight".format(key_prefix) in state_dict_keys:
+
+ autoregressive_config = {}
+ autoregressive_config["image_model"] = "higgsv2"
+
+ autoregressive_config["audio_adapter_type"] = "dual_ffn_fast_forward"
+ autoregressive_config["audio_bos_token"] = "<|audio_bos|>"
+ autoregressive_config["audio_codebook_size"] = 1024
+ autoregressive_config["audio_num_codebooks"] = 8
+ autoregressive_config["audio_ffn_hidden_size"] = 3072
+ autoregressive_config["audio_ffn_intermediate_size"] = 8192
+ autoregressive_config["audio_in_token"] = "<|AUDIO|>"
+ autoregressive_config["audio_in_token_idx"] = 128015
+ autoregressive_config["audio_out_token"] = "<|AUDIO_OUT|>"
+ autoregressive_config["audio_out_token_idx"] = 128016
+ autoregressive_config["audio_out_bos_token"] = "<|audio_out_bos|>"
+ autoregressive_config["audio_out_bos_token_id"] = 128013
+ autoregressive_config["audio_eos_token"] = "<|audio_eos|>"
+ autoregressive_config["audio_eos_token_id"] = 128012
+ autoregressive_config["audio_stream_bos_id"] = 1024
+ autoregressive_config["audio_stream_eos_id"] = 1025
+ autoregressive_config["encode_audio_in_tokens"] = True
+
+ autoregressive_config["pad_token_id"] = 128001
+ autoregressive_config["padding_idx"] = 128001
+
+ autoregressive_config["hidden_size"] = 3072
+ autoregressive_config["use_delay_pattern"] = True
+
+ autoregressive_config["vocab_size"] = 128256
+ autoregressive_config["num_hidden_layers"] = 28
+ autoregressive_config["num_attention_heads"] = 24
+ autoregressive_config["num_key_value_heads"] = 8
+ autoregressive_config["max_seq_len"] = 131072
+ autoregressive_config["max_position_embeddings"] = 131072
+ autoregressive_config["bos_token_id"] = 128000
+ autoregressive_config["eos_token_id"] = 128001
+ autoregressive_config["use_cache"] = True
+
+ autoregressive_config["text_config"] = {
+ "model_type": "llama",
+ "vocab_size": 128256,
+ "max_position_embeddings": 131072,
+ "num_hidden_layers": 28,
+ "hidden_size": 3072,
+ "num_attention_heads": 24,
+ "num_key_value_heads": 8,
+ "initializer_range": 0.02,
+ "rms_norm_eps": 1e-05,
+ "pad_token_id": None,
+ "bos_token_id": 128000,
+ "eos_token_id": 128001,
+ "num_return_sequences": 1,
+ "head_dim": 128,
+ "mlp_bias": False,
+ "intermediate_size": 8192
+ }
+
+ autoregressive_config["use_kv_buckets"] = True
+ autoregressive_config["num_attention_heads"] = 24
+
+ autoregressive_config["audio_decoder_proj_num_layers"] = 0
+ autoregressive_config["audio_dual_ffn_layers"] = list(range(28))
+ autoregressive_config["output_vae"] = False
+
+ return autoregressive_config
+
if '{}caption_projection.0.linear.weight'.format(key_prefix) in state_dict_keys: # HiDream
dit_config = {}
dit_config["image_model"] = "hidream"
diff --git a/comfy/sd.py b/comfy/sd.py
index 5b95cf75a..1c6173450 100644
--- a/comfy/sd.py
+++ b/comfy/sd.py
@@ -1041,6 +1041,7 @@ def load_state_dict_guess_config(sd, output_vae=True, output_clip=True, output_c
manual_cast_dtype = model_management.unet_manual_cast(unet_dtype, load_device, model_config.supported_inference_dtypes)
model_config.set_inference_dtype(unet_dtype, manual_cast_dtype)
+ output_vae = model_config.unet_config.get("output_vae", output_vae)
if model_config.clip_vision_prefix is not None:
if output_clipvision:
diff --git a/comfy/supported_models.py b/comfy/supported_models.py
index 2669ca01e..f17f0fc8a 100644
--- a/comfy/supported_models.py
+++ b/comfy/supported_models.py
@@ -19,6 +19,7 @@ import comfy.text_encoders.lumina2
import comfy.text_encoders.wan
import comfy.text_encoders.ace
import comfy.text_encoders.omnigen2
+import comfy.text_encoders.higgsv2
from . import supported_models_base
from . import latent_formats
@@ -1216,7 +1217,22 @@ class Omnigen2(supported_models_base.BASE):
hunyuan_detect = comfy.text_encoders.hunyuan_video.llama_detect(state_dict, "{}qwen25_3b.transformer.".format(pref))
return supported_models_base.ClipTarget(comfy.text_encoders.omnigen2.LuminaTokenizer, comfy.text_encoders.omnigen2.te(**hunyuan_detect))
+class Higgsv2(supported_models_base.BASE):
+ unet_config = {
+ "image_model": "higgsv2",
+ }
-models = [LotusD, Stable_Zero123, SD15_instructpix2pix, SD15, SD20, SD21UnclipL, SD21UnclipH, SDXL_instructpix2pix, SDXLRefiner, SDXL, SSD1B, KOALA_700M, KOALA_1B, Segmind_Vega, SD_X4Upscaler, Stable_Cascade_C, Stable_Cascade_B, SV3D_u, SV3D_p, SD3, StableAudio, AuraFlow, PixArtAlpha, PixArtSigma, HunyuanDiT, HunyuanDiT1, FluxInpaint, Flux, FluxSchnell, GenmoMochi, LTXV, HunyuanVideoSkyreelsI2V, HunyuanVideoI2V, HunyuanVideo, CosmosT2V, CosmosI2V, CosmosT2IPredict2, CosmosI2VPredict2, Lumina2, WAN21_T2V, WAN21_I2V, WAN21_FunControl2V, WAN21_Vace, WAN21_Camera, Hunyuan3Dv2mini, Hunyuan3Dv2, HiDream, Chroma, ACEStep, Omnigen2]
+ memory_usage_factor = 1.0
+ supported_inference_dtypes = [torch.float16, torch.bfloat16, torch.float32]
+ text_encoder_key_prefix = ["dac."]
+
+ def get_model(self, state_dict, prefix="", device=None):
+ out = model_base.Higgsv2(self, device=device)
+ return out
+
+ def clip_target(self, state_dict = {}):
+ return supported_models_base.ClipTarget(comfy.text_encoders.higgsv2.DummyTokenizer, comfy.text_encoders.higgsv2.HiggsTokenizer)
+
+models = [LotusD, Stable_Zero123, SD15_instructpix2pix, SD15, SD20, SD21UnclipL, SD21UnclipH, SDXL_instructpix2pix, SDXLRefiner, SDXL, SSD1B, KOALA_700M, KOALA_1B, Segmind_Vega, SD_X4Upscaler, Stable_Cascade_C, Stable_Cascade_B, SV3D_u, SV3D_p, SD3, StableAudio, AuraFlow, PixArtAlpha, PixArtSigma, HunyuanDiT, HunyuanDiT1, FluxInpaint, Flux, FluxSchnell, GenmoMochi, LTXV, HunyuanVideoSkyreelsI2V, HunyuanVideoI2V, HunyuanVideo, CosmosT2V, CosmosI2V, CosmosT2IPredict2, CosmosI2VPredict2, Lumina2, WAN22_T2V, WAN21_T2V, WAN21_I2V, WAN21_FunControl2V, WAN21_Vace, WAN21_Camera, Hunyuan3Dv2mini, Hunyuan3Dv2, Hunyuan3Dv2_1, HiDream, Chroma, ACEStep, Omnigen2, Higgsv2]
models += [SVD_img2vid]
diff --git a/comfy/text_encoders/higgs_text_tokenizer/special_tokens_map.json b/comfy/text_encoders/higgs_text_tokenizer/special_tokens_map.json
new file mode 100644
index 000000000..cfabacc26
--- /dev/null
+++ b/comfy/text_encoders/higgs_text_tokenizer/special_tokens_map.json
@@ -0,0 +1,16 @@
+{
+ "bos_token": {
+ "content": "<|begin_of_text|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "<|end_of_text|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/comfy/text_encoders/higgs_text_tokenizer/tokenizer.json b/comfy/text_encoders/higgs_text_tokenizer/tokenizer.json
new file mode 100644
index 000000000..8b739a251
--- /dev/null
+++ b/comfy/text_encoders/higgs_text_tokenizer/tokenizer.json
@@ -0,0 +1,1251004 @@
+{
+ "version": "1.0",
+ "truncation": null,
+ "padding": null,
+ "added_tokens": [
+ {
+ "id": 128000,
+ "content": "<|begin_of_text|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128001,
+ "content": "<|end_of_text|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128002,
+ "content": "<|reserved_special_token_0|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128003,
+ "content": "<|reserved_special_token_1|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128004,
+ "content": "<|finetune_right_pad_id|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128005,
+ "content": "<|reserved_special_token_2|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128006,
+ "content": "<|start_header_id|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128007,
+ "content": "<|end_header_id|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128008,
+ "content": "<|eom_id|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128009,
+ "content": "<|eot_id|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128010,
+ "content": "<|python_tag|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128011,
+ "content": "<|audio_bos|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128012,
+ "content": "<|audio_eos|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128013,
+ "content": "<|audio_out_bos|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128014,
+ "content": "<|reserved_special_token_6|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128015,
+ "content": "<|AUDIO|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128016,
+ "content": "<|AUDIO_OUT|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128017,
+ "content": "<|recipient|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128018,
+ "content": "<|scene_desc_start|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128019,
+ "content": "<|scene_desc_end|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128020,
+ "content": "<|generation_instruction_start|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128021,
+ "content": "<|generation_instruction_end|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128022,
+ "content": "",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128023,
+ "content": "",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128024,
+ "content": "",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128025,
+ "content": "",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128026,
+ "content": "",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128027,
+ "content": "",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128028,
+ "content": "<|reserved_special_token_20|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128029,
+ "content": "<|reserved_special_token_21|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128030,
+ "content": "<|reserved_special_token_22|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128031,
+ "content": "<|reserved_special_token_23|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128032,
+ "content": "<|reserved_special_token_24|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128033,
+ "content": "<|reserved_special_token_25|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128034,
+ "content": "<|reserved_special_token_26|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128035,
+ "content": "<|reserved_special_token_27|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128036,
+ "content": "<|reserved_special_token_28|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128037,
+ "content": "<|reserved_special_token_29|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128038,
+ "content": "<|reserved_special_token_30|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128039,
+ "content": "<|reserved_special_token_31|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128040,
+ "content": "<|reserved_special_token_32|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128041,
+ "content": "<|reserved_special_token_33|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128042,
+ "content": "<|reserved_special_token_34|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128043,
+ "content": "<|reserved_special_token_35|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128044,
+ "content": "<|reserved_special_token_36|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128045,
+ "content": "<|reserved_special_token_37|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128046,
+ "content": "<|reserved_special_token_38|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128047,
+ "content": "<|reserved_special_token_39|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128048,
+ "content": "<|reserved_special_token_40|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128049,
+ "content": "<|reserved_special_token_41|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128050,
+ "content": "<|reserved_special_token_42|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128051,
+ "content": "<|reserved_special_token_43|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128052,
+ "content": "<|reserved_special_token_44|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128053,
+ "content": "<|reserved_special_token_45|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128054,
+ "content": "<|reserved_special_token_46|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128055,
+ "content": "<|reserved_special_token_47|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128056,
+ "content": "<|reserved_special_token_48|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128057,
+ "content": "<|reserved_special_token_49|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128058,
+ "content": "<|reserved_special_token_50|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128059,
+ "content": "<|reserved_special_token_51|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128060,
+ "content": "<|reserved_special_token_52|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128061,
+ "content": "<|reserved_special_token_53|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128062,
+ "content": "<|reserved_special_token_54|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128063,
+ "content": "<|reserved_special_token_55|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128064,
+ "content": "<|reserved_special_token_56|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128065,
+ "content": "<|reserved_special_token_57|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128066,
+ "content": "<|reserved_special_token_58|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128067,
+ "content": "<|reserved_special_token_59|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128068,
+ "content": "<|reserved_special_token_60|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128069,
+ "content": "<|reserved_special_token_61|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128070,
+ "content": "<|reserved_special_token_62|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128071,
+ "content": "<|reserved_special_token_63|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128072,
+ "content": "<|reserved_special_token_64|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128073,
+ "content": "<|reserved_special_token_65|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128074,
+ "content": "<|reserved_special_token_66|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128075,
+ "content": "<|reserved_special_token_67|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128076,
+ "content": "<|reserved_special_token_68|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128077,
+ "content": "<|reserved_special_token_69|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128078,
+ "content": "<|reserved_special_token_70|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128079,
+ "content": "<|reserved_special_token_71|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128080,
+ "content": "<|reserved_special_token_72|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128081,
+ "content": "<|reserved_special_token_73|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128082,
+ "content": "<|reserved_special_token_74|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128083,
+ "content": "<|reserved_special_token_75|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128084,
+ "content": "<|reserved_special_token_76|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128085,
+ "content": "<|reserved_special_token_77|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128086,
+ "content": "<|reserved_special_token_78|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128087,
+ "content": "<|reserved_special_token_79|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128088,
+ "content": "<|reserved_special_token_80|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128089,
+ "content": "<|reserved_special_token_81|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128090,
+ "content": "<|reserved_special_token_82|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128091,
+ "content": "<|reserved_special_token_83|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128092,
+ "content": "<|reserved_special_token_84|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128093,
+ "content": "<|reserved_special_token_85|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128094,
+ "content": "<|reserved_special_token_86|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128095,
+ "content": "<|reserved_special_token_87|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128096,
+ "content": "<|reserved_special_token_88|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128097,
+ "content": "<|reserved_special_token_89|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128098,
+ "content": "<|reserved_special_token_90|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128099,
+ "content": "<|reserved_special_token_91|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128100,
+ "content": "<|reserved_special_token_92|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128101,
+ "content": "<|reserved_special_token_93|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128102,
+ "content": "<|reserved_special_token_94|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128103,
+ "content": "<|reserved_special_token_95|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128104,
+ "content": "<|reserved_special_token_96|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128105,
+ "content": "<|reserved_special_token_97|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128106,
+ "content": "<|reserved_special_token_98|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128107,
+ "content": "<|reserved_special_token_99|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128108,
+ "content": "<|reserved_special_token_100|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128109,
+ "content": "<|reserved_special_token_101|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128110,
+ "content": "<|reserved_special_token_102|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128111,
+ "content": "<|reserved_special_token_103|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128112,
+ "content": "<|reserved_special_token_104|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128113,
+ "content": "<|reserved_special_token_105|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128114,
+ "content": "<|reserved_special_token_106|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128115,
+ "content": "<|reserved_special_token_107|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128116,
+ "content": "<|reserved_special_token_108|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128117,
+ "content": "<|reserved_special_token_109|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128118,
+ "content": "<|reserved_special_token_110|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128119,
+ "content": "<|reserved_special_token_111|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128120,
+ "content": "<|reserved_special_token_112|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128121,
+ "content": "<|reserved_special_token_113|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128122,
+ "content": "<|reserved_special_token_114|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128123,
+ "content": "<|reserved_special_token_115|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128124,
+ "content": "<|reserved_special_token_116|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128125,
+ "content": "<|reserved_special_token_117|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128126,
+ "content": "<|reserved_special_token_118|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128127,
+ "content": "<|reserved_special_token_119|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128128,
+ "content": "<|reserved_special_token_120|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128129,
+ "content": "<|reserved_special_token_121|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128130,
+ "content": "<|reserved_special_token_122|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128131,
+ "content": "<|reserved_special_token_123|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128132,
+ "content": "<|reserved_special_token_124|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128133,
+ "content": "<|reserved_special_token_125|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128134,
+ "content": "<|reserved_special_token_126|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128135,
+ "content": "<|reserved_special_token_127|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128136,
+ "content": "<|reserved_special_token_128|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128137,
+ "content": "<|reserved_special_token_129|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128138,
+ "content": "<|reserved_special_token_130|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128139,
+ "content": "<|reserved_special_token_131|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128140,
+ "content": "<|reserved_special_token_132|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128141,
+ "content": "<|reserved_special_token_133|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128142,
+ "content": "<|reserved_special_token_134|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128143,
+ "content": "<|reserved_special_token_135|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128144,
+ "content": "<|reserved_special_token_136|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128145,
+ "content": "<|reserved_special_token_137|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128146,
+ "content": "<|reserved_special_token_138|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128147,
+ "content": "<|reserved_special_token_139|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128148,
+ "content": "<|reserved_special_token_140|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128149,
+ "content": "<|reserved_special_token_141|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128150,
+ "content": "<|reserved_special_token_142|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128151,
+ "content": "<|reserved_special_token_143|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128152,
+ "content": "<|reserved_special_token_144|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128153,
+ "content": "<|reserved_special_token_145|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128154,
+ "content": "<|reserved_special_token_146|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128155,
+ "content": "<|reserved_special_token_147|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128156,
+ "content": "<|reserved_special_token_148|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128157,
+ "content": "<|reserved_special_token_149|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128158,
+ "content": "<|reserved_special_token_150|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128159,
+ "content": "<|reserved_special_token_151|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128160,
+ "content": "<|reserved_special_token_152|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128161,
+ "content": "<|reserved_special_token_153|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128162,
+ "content": "<|reserved_special_token_154|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128163,
+ "content": "<|reserved_special_token_155|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128164,
+ "content": "<|reserved_special_token_156|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128165,
+ "content": "<|reserved_special_token_157|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128166,
+ "content": "<|reserved_special_token_158|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128167,
+ "content": "<|reserved_special_token_159|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128168,
+ "content": "<|reserved_special_token_160|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128169,
+ "content": "<|reserved_special_token_161|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128170,
+ "content": "<|reserved_special_token_162|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128171,
+ "content": "<|reserved_special_token_163|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128172,
+ "content": "<|reserved_special_token_164|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128173,
+ "content": "<|reserved_special_token_165|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128174,
+ "content": "<|reserved_special_token_166|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128175,
+ "content": "<|reserved_special_token_167|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128176,
+ "content": "<|reserved_special_token_168|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128177,
+ "content": "<|reserved_special_token_169|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128178,
+ "content": "<|reserved_special_token_170|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128179,
+ "content": "<|reserved_special_token_171|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128180,
+ "content": "<|reserved_special_token_172|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128181,
+ "content": "<|reserved_special_token_173|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128182,
+ "content": "<|reserved_special_token_174|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128183,
+ "content": "<|reserved_special_token_175|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128184,
+ "content": "<|reserved_special_token_176|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128185,
+ "content": "<|reserved_special_token_177|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128186,
+ "content": "<|reserved_special_token_178|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128187,
+ "content": "<|reserved_special_token_179|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128188,
+ "content": "<|reserved_special_token_180|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128189,
+ "content": "<|reserved_special_token_181|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128190,
+ "content": "<|reserved_special_token_182|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128191,
+ "content": "<|reserved_special_token_183|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128192,
+ "content": "<|reserved_special_token_184|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128193,
+ "content": "<|reserved_special_token_185|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128194,
+ "content": "<|reserved_special_token_186|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128195,
+ "content": "<|reserved_special_token_187|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128196,
+ "content": "<|reserved_special_token_188|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128197,
+ "content": "<|reserved_special_token_189|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128198,
+ "content": "<|reserved_special_token_190|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128199,
+ "content": "<|reserved_special_token_191|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128200,
+ "content": "<|reserved_special_token_192|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128201,
+ "content": "<|reserved_special_token_193|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128202,
+ "content": "<|reserved_special_token_194|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128203,
+ "content": "<|reserved_special_token_195|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128204,
+ "content": "<|reserved_special_token_196|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128205,
+ "content": "<|reserved_special_token_197|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128206,
+ "content": "<|reserved_special_token_198|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128207,
+ "content": "<|reserved_special_token_199|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128208,
+ "content": "<|reserved_special_token_200|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128209,
+ "content": "<|reserved_special_token_201|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128210,
+ "content": "<|reserved_special_token_202|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128211,
+ "content": "<|reserved_special_token_203|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128212,
+ "content": "<|reserved_special_token_204|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128213,
+ "content": "<|reserved_special_token_205|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128214,
+ "content": "<|reserved_special_token_206|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128215,
+ "content": "<|reserved_special_token_207|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128216,
+ "content": "<|reserved_special_token_208|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128217,
+ "content": "<|reserved_special_token_209|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128218,
+ "content": "<|reserved_special_token_210|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128219,
+ "content": "<|reserved_special_token_211|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128220,
+ "content": "<|reserved_special_token_212|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128221,
+ "content": "<|reserved_special_token_213|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128222,
+ "content": "<|reserved_special_token_214|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128223,
+ "content": "<|reserved_special_token_215|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128224,
+ "content": "<|reserved_special_token_216|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128225,
+ "content": "<|reserved_special_token_217|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128226,
+ "content": "<|reserved_special_token_218|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128227,
+ "content": "<|reserved_special_token_219|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128228,
+ "content": "<|reserved_special_token_220|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128229,
+ "content": "<|reserved_special_token_221|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128230,
+ "content": "<|reserved_special_token_222|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128231,
+ "content": "<|reserved_special_token_223|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128232,
+ "content": "<|reserved_special_token_224|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128233,
+ "content": "<|reserved_special_token_225|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128234,
+ "content": "<|reserved_special_token_226|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128235,
+ "content": "<|reserved_special_token_227|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128236,
+ "content": "<|reserved_special_token_228|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128237,
+ "content": "<|reserved_special_token_229|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128238,
+ "content": "<|reserved_special_token_230|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128239,
+ "content": "<|reserved_special_token_231|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128240,
+ "content": "<|reserved_special_token_232|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128241,
+ "content": "<|reserved_special_token_233|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128242,
+ "content": "<|reserved_special_token_234|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128243,
+ "content": "<|reserved_special_token_235|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128244,
+ "content": "<|reserved_special_token_236|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128245,
+ "content": "<|reserved_special_token_237|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128246,
+ "content": "<|reserved_special_token_238|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128247,
+ "content": "<|reserved_special_token_239|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128248,
+ "content": "<|reserved_special_token_240|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128249,
+ "content": "<|reserved_special_token_241|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128250,
+ "content": "<|reserved_special_token_242|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128251,
+ "content": "<|reserved_special_token_243|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128252,
+ "content": "<|reserved_special_token_244|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128253,
+ "content": "<|reserved_special_token_245|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128254,
+ "content": "<|reserved_special_token_246|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ },
+ {
+ "id": 128255,
+ "content": "<|reserved_special_token_247|>",
+ "single_word": false,
+ "lstrip": false,
+ "rstrip": false,
+ "normalized": false,
+ "special": true
+ }
+ ],
+ "normalizer": null,
+ "pre_tokenizer": {
+ "type": "Sequence",
+ "pretokenizers": [
+ {
+ "type": "Split",
+ "pattern": {
+ "Regex": "(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}{1,3}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+"
+ },
+ "behavior": "Isolated",
+ "invert": false
+ },
+ {
+ "type": "ByteLevel",
+ "add_prefix_space": false,
+ "trim_offsets": true,
+ "use_regex": false
+ }
+ ]
+ },
+ "post_processor": {
+ "type": "Sequence",
+ "processors": [
+ {
+ "type": "ByteLevel",
+ "add_prefix_space": true,
+ "trim_offsets": false,
+ "use_regex": true
+ },
+ {
+ "type": "TemplateProcessing",
+ "single": [
+ {
+ "SpecialToken": {
+ "id": "<|begin_of_text|>",
+ "type_id": 0
+ }
+ },
+ {
+ "Sequence": {
+ "id": "A",
+ "type_id": 0
+ }
+ }
+ ],
+ "pair": [
+ {
+ "SpecialToken": {
+ "id": "<|begin_of_text|>",
+ "type_id": 0
+ }
+ },
+ {
+ "Sequence": {
+ "id": "A",
+ "type_id": 0
+ }
+ },
+ {
+ "SpecialToken": {
+ "id": "<|begin_of_text|>",
+ "type_id": 1
+ }
+ },
+ {
+ "Sequence": {
+ "id": "B",
+ "type_id": 1
+ }
+ }
+ ],
+ "special_tokens": {
+ "<|begin_of_text|>": {
+ "id": "<|begin_of_text|>",
+ "ids": [
+ 128000
+ ],
+ "tokens": [
+ "<|begin_of_text|>"
+ ]
+ }
+ }
+ }
+ ]
+ },
+ "decoder": {
+ "type": "ByteLevel",
+ "add_prefix_space": true,
+ "trim_offsets": true,
+ "use_regex": true
+ },
+ "model": {
+ "type": "BPE",
+ "dropout": null,
+ "unk_token": null,
+ "continuing_subword_prefix": null,
+ "end_of_word_suffix": null,
+ "fuse_unk": false,
+ "byte_fallback": false,
+ "ignore_merges": true,
+ "vocab": {
+ "!": 0,
+ "\"": 1,
+ "#": 2,
+ "$": 3,
+ "%": 4,
+ "&": 5,
+ "'": 6,
+ "(": 7,
+ ")": 8,
+ "*": 9,
+ "+": 10,
+ ",": 11,
+ "-": 12,
+ ".": 13,
+ "/": 14,
+ "0": 15,
+ "1": 16,
+ "2": 17,
+ "3": 18,
+ "4": 19,
+ "5": 20,
+ "6": 21,
+ "7": 22,
+ "8": 23,
+ "9": 24,
+ ":": 25,
+ ";": 26,
+ "<": 27,
+ "=": 28,
+ ">": 29,
+ "?": 30,
+ "@": 31,
+ "A": 32,
+ "B": 33,
+ "C": 34,
+ "D": 35,
+ "E": 36,
+ "F": 37,
+ "G": 38,
+ "H": 39,
+ "I": 40,
+ "J": 41,
+ "K": 42,
+ "L": 43,
+ "M": 44,
+ "N": 45,
+ "O": 46,
+ "P": 47,
+ "Q": 48,
+ "R": 49,
+ "S": 50,
+ "T": 51,
+ "U": 52,
+ "V": 53,
+ "W": 54,
+ "X": 55,
+ "Y": 56,
+ "Z": 57,
+ "[": 58,
+ "\\": 59,
+ "]": 60,
+ "^": 61,
+ "_": 62,
+ "`": 63,
+ "a": 64,
+ "b": 65,
+ "c": 66,
+ "d": 67,
+ "e": 68,
+ "f": 69,
+ "g": 70,
+ "h": 71,
+ "i": 72,
+ "j": 73,
+ "k": 74,
+ "l": 75,
+ "m": 76,
+ "n": 77,
+ "o": 78,
+ "p": 79,
+ "q": 80,
+ "r": 81,
+ "s": 82,
+ "t": 83,
+ "u": 84,
+ "v": 85,
+ "w": 86,
+ "x": 87,
+ "y": 88,
+ "z": 89,
+ "{": 90,
+ "|": 91,
+ "}": 92,
+ "~": 93,
+ "¡": 94,
+ "¢": 95,
+ "£": 96,
+ "¤": 97,
+ "¥": 98,
+ "¦": 99,
+ "§": 100,
+ "¨": 101,
+ "©": 102,
+ "ª": 103,
+ "«": 104,
+ "¬": 105,
+ "®": 106,
+ "¯": 107,
+ "°": 108,
+ "±": 109,
+ "²": 110,
+ "³": 111,
+ "´": 112,
+ "µ": 113,
+ "¶": 114,
+ "·": 115,
+ "¸": 116,
+ "¹": 117,
+ "º": 118,
+ "»": 119,
+ "¼": 120,
+ "½": 121,
+ "¾": 122,
+ "¿": 123,
+ "À": 124,
+ "Á": 125,
+ "Â": 126,
+ "Ã": 127,
+ "Ä": 128,
+ "Å": 129,
+ "Æ": 130,
+ "Ç": 131,
+ "È": 132,
+ "É": 133,
+ "Ê": 134,
+ "Ë": 135,
+ "Ì": 136,
+ "Í": 137,
+ "Î": 138,
+ "Ï": 139,
+ "Ð": 140,
+ "Ñ": 141,
+ "Ò": 142,
+ "Ó": 143,
+ "Ô": 144,
+ "Õ": 145,
+ "Ö": 146,
+ "×": 147,
+ "Ø": 148,
+ "Ù": 149,
+ "Ú": 150,
+ "Û": 151,
+ "Ü": 152,
+ "Ý": 153,
+ "Þ": 154,
+ "ß": 155,
+ "à": 156,
+ "á": 157,
+ "â": 158,
+ "ã": 159,
+ "ä": 160,
+ "å": 161,
+ "æ": 162,
+ "ç": 163,
+ "è": 164,
+ "é": 165,
+ "ê": 166,
+ "ë": 167,
+ "ì": 168,
+ "í": 169,
+ "î": 170,
+ "ï": 171,
+ "ð": 172,
+ "ñ": 173,
+ "ò": 174,
+ "ó": 175,
+ "ô": 176,
+ "õ": 177,
+ "ö": 178,
+ "÷": 179,
+ "ø": 180,
+ "ù": 181,
+ "ú": 182,
+ "û": 183,
+ "ü": 184,
+ "ý": 185,
+ "þ": 186,
+ "ÿ": 187,
+ "Ā": 188,
+ "ā": 189,
+ "Ă": 190,
+ "ă": 191,
+ "Ą": 192,
+ "ą": 193,
+ "Ć": 194,
+ "ć": 195,
+ "Ĉ": 196,
+ "ĉ": 197,
+ "Ċ": 198,
+ "ċ": 199,
+ "Č": 200,
+ "č": 201,
+ "Ď": 202,
+ "ď": 203,
+ "Đ": 204,
+ "đ": 205,
+ "Ē": 206,
+ "ē": 207,
+ "Ĕ": 208,
+ "ĕ": 209,
+ "Ė": 210,
+ "ė": 211,
+ "Ę": 212,
+ "ę": 213,
+ "Ě": 214,
+ "ě": 215,
+ "Ĝ": 216,
+ "ĝ": 217,
+ "Ğ": 218,
+ "ğ": 219,
+ "Ġ": 220,
+ "ġ": 221,
+ "Ģ": 222,
+ "ģ": 223,
+ "Ĥ": 224,
+ "ĥ": 225,
+ "Ħ": 226,
+ "ħ": 227,
+ "Ĩ": 228,
+ "ĩ": 229,
+ "Ī": 230,
+ "ī": 231,
+ "Ĭ": 232,
+ "ĭ": 233,
+ "Į": 234,
+ "į": 235,
+ "İ": 236,
+ "ı": 237,
+ "IJ": 238,
+ "ij": 239,
+ "Ĵ": 240,
+ "ĵ": 241,
+ "Ķ": 242,
+ "ķ": 243,
+ "ĸ": 244,
+ "Ĺ": 245,
+ "ĺ": 246,
+ "Ļ": 247,
+ "ļ": 248,
+ "Ľ": 249,
+ "ľ": 250,
+ "Ŀ": 251,
+ "ŀ": 252,
+ "Ł": 253,
+ "ł": 254,
+ "Ń": 255,
+ "ĠĠ": 256,
+ "ĠĠĠĠ": 257,
+ "in": 258,
+ "Ġt": 259,
+ "ĠĠĠĠĠĠĠĠ": 260,
+ "er": 261,
+ "ĠĠĠ": 262,
+ "on": 263,
+ "Ġa": 264,
+ "re": 265,
+ "at": 266,
+ "st": 267,
+ "en": 268,
+ "or": 269,
+ "Ġth": 270,
+ "ĊĊ": 271,
+ "Ġc": 272,
+ "le": 273,
+ "Ġs": 274,
+ "it": 275,
+ "an": 276,
+ "ar": 277,
+ "al": 278,
+ "Ġthe": 279,
+ ";Ċ": 280,
+ "Ġp": 281,
+ "Ġf": 282,
+ "ou": 283,
+ "Ġ=": 284,
+ "is": 285,
+ "ĠĠĠĠĠĠĠ": 286,
+ "ing": 287,
+ "es": 288,
+ "Ġw": 289,
+ "ion": 290,
+ "ed": 291,
+ "ic": 292,
+ "Ġb": 293,
+ "Ġd": 294,
+ "et": 295,
+ "Ġm": 296,
+ "Ġo": 297,
+ "ĉĉ": 298,
+ "ro": 299,
+ "as": 300,
+ "el": 301,
+ "ct": 302,
+ "nd": 303,
+ "Ġin": 304,
+ "Ġh": 305,
+ "ent": 306,
+ "id": 307,
+ "Ġn": 308,
+ "am": 309,
+ "ĠĠĠĠĠĠĠĠĠĠĠ": 310,
+ "Ġto": 311,
+ "Ġre": 312,
+ "--": 313,
+ "Ġ{": 314,
+ "Ġof": 315,
+ "om": 316,
+ ");Ċ": 317,
+ "im": 318,
+ "čĊ": 319,
+ "Ġ(": 320,
+ "il": 321,
+ "//": 322,
+ "Ġand": 323,
+ "ur": 324,
+ "se": 325,
+ "Ġl": 326,
+ "ex": 327,
+ "ĠS": 328,
+ "ad": 329,
+ "Ġ\"": 330,
+ "ch": 331,
+ "ut": 332,
+ "if": 333,
+ "**": 334,
+ "Ġ}": 335,
+ "em": 336,
+ "ol": 337,
+ "ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ": 338,
+ "th": 339,
+ ")Ċ": 340,
+ "Ġ{Ċ": 341,
+ "Ġg": 342,
+ "ig": 343,
+ "iv": 344,
+ ",Ċ": 345,
+ "ce": 346,
+ "od": 347,
+ "Ġv": 348,
+ "ate": 349,
+ "ĠT": 350,
+ "ag": 351,
+ "ay": 352,
+ "Ġ*": 353,
+ "ot": 354,
+ "us": 355,
+ "ĠC": 356,
+ "Ġst": 357,
+ "ĠI": 358,
+ "un": 359,
+ "ul": 360,
+ "ue": 361,
+ "ĠA": 362,
+ "ow": 363,
+ "Ġ'": 364,
+ "ew": 365,
+ "Ġ<": 366,
+ "ation": 367,
+ "()": 368,
+ "Ġfor": 369,
+ "ab": 370,
+ "ort": 371,
+ "um": 372,
+ "ame": 373,
+ "Ġis": 374,
+ "pe": 375,
+ "tr": 376,
+ "ck": 377,
+ "âĢ": 378,
+ "Ġy": 379,
+ "ist": 380,
+ "----": 381,
+ ".ĊĊ": 382,
+ "he": 383,
+ "Ġe": 384,
+ "lo": 385,
+ "ĠM": 386,
+ "Ġbe": 387,
+ "ers": 388,
+ "Ġon": 389,
+ "Ġcon": 390,
+ "ap": 391,
+ "ub": 392,
+ "ĠP": 393,
+ "ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ": 394,
+ "ass": 395,
+ "int": 396,
+ ">Ċ": 397,
+ "ly": 398,
+ "urn": 399,
+ "Ġ$": 400,
+ ";ĊĊ": 401,
+ "av": 402,
+ "port": 403,
+ "ir": 404,
+ "->": 405,
+ "nt": 406,
+ "ction": 407,
+ "end": 408,
+ "Ġde": 409,
+ "00": 410,
+ "ith": 411,
+ "out": 412,
+ "turn": 413,
+ "our": 414,
+ "ĠĠĠĠĠ": 415,
+ "lic": 416,
+ "res": 417,
+ "pt": 418,
+ "==": 419,
+ "Ġthis": 420,
+ "Ġwh": 421,
+ "Ġif": 422,
+ "ĠD": 423,
+ "ver": 424,
+ "age": 425,
+ "ĠB": 426,
+ "ht": 427,
+ "ext": 428,
+ "=\"": 429,
+ "Ġthat": 430,
+ "****": 431,
+ "ĠR": 432,
+ "Ġit": 433,
+ "ess": 434,
+ "ĠF": 435,
+ "Ġr": 436,
+ "os": 437,
+ "and": 438,
+ "Ġas": 439,
+ "ect": 440,
+ "ke": 441,
+ "rom": 442,
+ "Ġ//": 443,
+ "con": 444,
+ "ĠL": 445,
+ "(\"": 446,
+ "qu": 447,
+ "lass": 448,
+ "Ġwith": 449,
+ "iz": 450,
+ "de": 451,
+ "ĠN": 452,
+ "Ġal": 453,
+ "op": 454,
+ "up": 455,
+ "get": 456,
+ "Ġ}Ċ": 457,
+ "ile": 458,
+ "Ġan": 459,
+ "ata": 460,
+ "ore": 461,
+ "ri": 462,
+ "Ġpro": 463,
+ ";čĊ": 464,
+ "ĉĉĉĉ": 465,
+ "ter": 466,
+ "ain": 467,
+ "ĠW": 468,
+ "ĠE": 469,
+ "Ġcom": 470,
+ "Ġreturn": 471,
+ "art": 472,
+ "ĠH": 473,
+ "ack": 474,
+ "import": 475,
+ "ublic": 476,
+ "Ġor": 477,
+ "est": 478,
+ "ment": 479,
+ "ĠG": 480,
+ "able": 481,
+ "Ġ-": 482,
+ "ine": 483,
+ "ill": 484,
+ "ind": 485,
+ "ere": 486,
+ "::": 487,
+ "ity": 488,
+ "Ġ+": 489,
+ "Ġtr": 490,
+ "elf": 491,
+ "ight": 492,
+ "('": 493,
+ "orm": 494,
+ "ult": 495,
+ "str": 496,
+ "..": 497,
+ "\",": 498,
+ "Ġyou": 499,
+ "ype": 500,
+ "pl": 501,
+ "Ġnew": 502,
+ "Ġj": 503,
+ "ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ": 504,
+ "Ġfrom": 505,
+ "Ġex": 506,
+ "ĠO": 507,
+ "20": 508,
+ "ld": 509,
+ "Ġ[": 510,
+ "oc": 511,
+ ":Ċ": 512,
+ "Ġse": 513,
+ "Ġle": 514,
+ "--------": 515,
+ ".s": 516,
+ "{Ċ": 517,
+ "',": 518,
+ "ant": 519,
+ "Ġat": 520,
+ "ase": 521,
+ ".c": 522,
+ "Ġch": 523,
+ "": 524,
+ "ave": 525,
+ "ang": 526,
+ "Ġare": 527,
+ "Ġint": 528,
+ "âĢĻ": 529,
+ "_t": 530,
+ "ert": 531,
+ "ial": 532,
+ "act": 533,
+ "}Ċ": 534,
+ "ive": 535,
+ "ode": 536,
+ "ost": 537,
+ "Ġclass": 538,
+ "Ġnot": 539,
+ "og": 540,
+ "ord": 541,
+ "alue": 542,
+ "all": 543,
+ "ff": 544,
+ "();Ċ": 545,
+ "ont": 546,
+ "ime": 547,
+ "are": 548,
+ "ĠU": 549,
+ "Ġpr": 550,
+ "Ġ:": 551,
+ "ies": 552,
+ "ize": 553,
+ "ure": 554,
+ "Ġby": 555,
+ "ire": 556,
+ "Ġ}ĊĊ": 557,
+ ".p": 558,
+ "Ġsh": 559,
+ "ice": 560,
+ "ast": 561,
+ "ption": 562,
+ "tring": 563,
+ "ok": 564,
+ "__": 565,
+ "cl": 566,
+ "##": 567,
+ "Ġhe": 568,
+ "ard": 569,
+ ").": 570,
+ "Ġ@": 571,
+ "iew": 572,
+ "ĉĉĉ": 573,
+ "Ġwas": 574,
+ "ip": 575,
+ "this": 576,
+ "Ġu": 577,
+ "ĠThe": 578,
+ "ide": 579,
+ "ace": 580,
+ "ib": 581,
+ "ac": 582,
+ "rou": 583,
+ "Ġwe": 584,
+ "ject": 585,
+ "Ġpublic": 586,
+ "ak": 587,
+ "ve": 588,
+ "ath": 589,
+ "oid": 590,
+ "Ġ=>": 591,
+ "ust": 592,
+ "que": 593,
+ "Ġres": 594,
+ "))": 595,
+ "'s": 596,
+ "Ġk": 597,
+ "ans": 598,
+ "yst": 599,
+ "unction": 600,
+ "********": 601,
+ "Ġi": 602,
+ "Ġus": 603,
+ "pp": 604,
+ "10": 605,
+ "one": 606,
+ "ail": 607,
+ "====": 608,
+ "name": 609,
+ "Ġstr": 610,
+ "Ġ/": 611,
+ "Ġ&": 612,
+ "ach": 613,
+ "div": 614,
+ "ystem": 615,
+ "ell": 616,
+ "Ġhave": 617,
+ "err": 618,
+ "ould": 619,
+ "ull": 620,
+ "pon": 621,
+ "ĠJ": 622,
+ "_p": 623,
+ "Ġ==": 624,
+ "ign": 625,
+ "St": 626,
+ ".Ċ": 627,
+ "Ġpl": 628,
+ ");ĊĊ": 629,
+ "form": 630,
+ "put": 631,
+ "ount": 632,
+ "}ĊĊ": 633,
+ "dd": 634,
+ "ite": 635,
+ "Ġget": 636,
+ "rr": 637,
+ "ome": 638,
+ "ĠâĢ": 639,
+ "aram": 640,
+ "cc": 641,
+ "Ġ*/": 642,
+ "ER": 643,
+ "In": 644,
+ "les": 645,
+ "_s": 646,
+ "ong": 647,
+ "ie": 648,
+ "Ġcan": 649,
+ "ĠV": 650,
+ "erv": 651,
+ "pr": 652,
+ "Ġun": 653,
+ "row": 654,
+ "ber": 655,
+ "Ġdo": 656,
+ "ll": 657,
+ "Ġel": 658,
+ "Ġself": 659,
+ "ated": 660,
+ "ary": 661,
+ "Ġ.": 662,
+ "']": 663,
+ "ud": 664,
+ "Ġen": 665,
+ "ĠTh": 666,
+ "ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ": 667,
+ "te": 668,
+ "_c": 669,
+ "uct": 670,
+ "Ġab": 671,
+ "ork": 672,
+ ".get": 673,
+ "Ġ#": 674,
+ "aw": 675,
+ "ress": 676,
+ "ob": 677,
+ "Name": 678,
+ "201": 679,
+ "app": 680,
+ "['": 681,
+ "Ġall": 682,
+ "ory": 683,
+ "ition": 684,
+ "ance": 685,
+ "ear": 686,
+ "Ġcont": 687,
+ "vent": 688,
+ "ia": 689,
+ "Ġwill": 690,
+ "IN": 691,
+ "ĠĠĠĠĠĠĠĠĠ": 692,
+ "return": 693,
+ "Ġ": 694,
+ "data": 695,
+ ")ĊĊ": 696,
+ "Re": 697,
+ "ple": 698,
+ "ild": 699,
+ "ther": 700,
+ "Ġyour": 701,
+ "\"Ċ": 702,
+ "($": 703,
+ "Ġout": 704,
+ "),": 705,
+ "Ġhas": 706,
+ "String": 707,
+ "so": 708,
+ "Ġup": 709,
+ "ax": 710,
+ "Ġdef": 711,
+ "Ġbo": 712,
+ "ge": 713,
+ "alse": 714,
+ "ON": 715,
+ "per": 716,
+ "12": 717,
+ "ich": 718,
+ "Ġbut": 719,
+ "ĠĊ": 720,
+ "Ġ_": 721,
+ "_m": 722,
+ "add": 723,
+ "quest": 724,
+ "odel": 725,
+ "self": 726,
+ "ery": 727,
+ "ft": 728,
+ "ens": 729,
+ "////": 730,
+ "ake": 731,
+ ".C": 732,
+ "Ġgo": 733,
+ "Ġfunction": 734,
+ "ĠK": 735,
+ "ivate": 736,
+ "Ġim": 737,
+ "Ġconst": 738,
+ ".t": 739,
+ "Ġ*/Ċ": 740,
+ ");čĊ": 741,
+ "Ġvoid": 742,
+ "Ġset": 743,
+ "ĠSystem": 744,
+ "cri": 745,
+ "()Ċ": 746,
+ "li": 747,
+ "ĉif": 748,
+ ".m": 749,
+ "ally": 750,
+ "set": 751,
+ "ep": 752,
+ "âĢĻs": 753,
+ "bo": 754,
+ "def": 755,
+ "',Ċ": 756,
+ "Ġme": 757,
+ "Ġ!": 758,
+ "atch": 759,
+ "\">": 760,
+ "\",Ċ": 761,
+ "ec": 762,
+ "ĠIn": 763,
+ "ph": 764,
+ "Ġ|": 765,
+ "_f": 766,
+ "Ġvar": 767,
+ "ence": 768,
+ "Id": 769,
+ "ree": 770,
+ "ink": 771,
+ "lect": 772,
+ "ug": 773,
+ "eth": 774,
+ "Ġelse": 775,
+ "----------------": 776,
+ "19": 777,
+ "cont": 778,
+ "Ġso": 779,
+ "atic": 780,
+ "Ġlo": 781,
+ "pro": 782,
+ "ton": 783,
+ "ss": 784,
+ "own": 785,
+ "abel": 786,
+ "oint": 787,
+ "ous": 788,
+ "eld": 789,
+ "ST": 790,
+ "The": 791,
+ "ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ": 792,
+ "RE": 793,
+ "\":": 794,
+ "olor": 795,
+ "tp": 796,
+ "eg": 797,
+ "key": 798,
+ "ude": 799,
+ "ĠSt": 800,
+ "ound": 801,
+ "Ġar": 802,
+ "\");Ċ": 803,
+ "ener": 804,
+ "ser": 805,
+ "11": 806,
+ "bject": 807,
+ "essage": 808,
+ "fer": 809,
+ "Ġmore": 810,
+ "ations": 811,
+ "ents": 812,
+ "Ġhis": 813,
+ "Ġthey": 814,
+ ".S": 815,
+ "ĠY": 816,
+ "use": 817,
+ "ne": 818,
+ "ish": 819,
+ "old": 820,
+ "_d": 821,
+ "io": 822,
+ "ield": 823,
+ "Ġper": 824,
+ "Cont": 825,
+ "ings": 826,
+ "####": 827,
+ "Ġdata": 828,
+ "Ġsa": 829,
+ "ef": 830,
+ "fo": 831,
+ "Ġone": 832,
+ "eng": 833,
+ "Ġdis": 834,
+ "AT": 835,
+ "Ġname": 836,
+ "Ġtrue": 837,
+ "val": 838,
+ "led": 839,
+ ".f": 840,
+ "Ġne": 841,
+ "Ġend": 842,
+ "32": 843,
+ ".T": 844,
+ "16": 845,
+ "cre": 846,
+ "ark": 847,
+ "log": 848,
+ "Ex": 849,
+ "error": 850,
+ "_id": 851,
+ "urre": 852,
+ "ange": 853,
+ "Ġnull": 854,
+ "rray": 855,
+ "Ġmy": 856,
+ "pan": 857,
+ "ict": 858,
+ "ator": 859,
+ "View": 860,
+ "List": 861,
+ "ĉreturn": 862,
+ "âĢĿ": 863,
+ "Ġpre": 864,
+ "Ġx": 865,
+ "clude": 866,
+ "arg": 867,
+ "15": 868,
+ "ov": 869,
+ ".h": 870,
+ "Ġ>": 871,
+ "Ġtheir": 872,
+ "')": 873,
+ "irst": 874,
+ "ick": 875,
+ "gh": 876,
+ "LE": 877,
+ "OR": 878,
+ "Ġprivate": 879,
+ "tem": 880,
+ "čĊčĊ": 881,
+ "user": 882,
+ "Ġ)": 883,
+ "com": 884,
+ ".A": 885,
+ "\";Ċ": 886,
+ "Ġid": 887,
+ "read": 888,
+ "Ġwho": 889,
+ "_b": 890,
+ "\">Ċ": 891,
+ "Ġtime": 892,
+ "Ġman": 893,
+ "ry": 894,
+ "========": 895,
+ "roup": 896,
+ "rop": 897,
+ "public": 898,
+ "vel": 899,
+ "umber": 900,
+ "ble": 901,
+ "Ġwhich": 902,
+ "****************": 903,
+ "Ġany": 904,
+ "Ġfalse": 905,
+ "we": 906,
+ "Ġvalue": 907,
+ "Ġli": 908,
+ "\")": 909,
+ "nder": 910,
+ "gr": 911,
+ "Ġno": 912,
+ "param": 913,
+ "25": 914,
+ "fig": 915,
+ ".com": 916,
+ "Ġapp": 917,
+ "_l": 918,
+ "ions": 919,
+ ".D": 920,
+ "ĠCh": 921,
+ "Ġabout": 922,
+ "Ġadd": 923,
+ "Ġsu": 924,
+ "Ġstring": 925,
+ "ID": 926,
+ "Ġover": 927,
+ "string": 928,
+ ".l": 929,
+ "ource": 930,
+ "000": 931,
+ "_C": 932,
+ "]Ċ": 933,
+ "Ġqu": 934,
+ "ĠString": 935,
+ "ca": 936,
+ "SE": 937,
+ "Ġro": 938,
+ "sh": 939,
+ "ual": 940,
+ "Type": 941,
+ "son": 942,
+ "new": 943,
+ "ern": 944,
+ "Ġag": 945,
+ "AR": 946,
+ "];Ċ": 947,
+ "].": 948,
+ "Ġ?": 949,
+ "ical": 950,
+ "Ġdes": 951,
+ "uth": 952,
+ "ix": 953,
+ "ays": 954,
+ "Ġtype": 955,
+ "'t": 956,
+ "ault": 957,
+ "Ġinter": 958,
+ "var": 959,
+ ".b": 960,
+ "Ġpart": 961,
+ ".d": 962,
+ "urrent": 963,
+ "IT": 964,
+ "EN": 965,
+ "30": 966,
+ "enc": 967,
+ "(f": 968,
+ "ra": 969,
+ "value": 970,
+ "cho": 971,
+ "18": 972,
+ "utton": 973,
+ "ose": 974,
+ "14": 975,
+ "Ġ!=": 976,
+ "ater": 977,
+ "é": 978,
+ "reate": 979,
+ "oll": 980,
+ "pos": 981,
+ "yle": 982,
+ "ng": 983,
+ "AL": 984,
+ "using": 985,
+ "ames": 986,
+ "Ġ{čĊ": 987,
+ "ates": 988,
+ "ely": 989,
+ "Ġwork": 990,
+ "Ġem": 991,
+ "inal": 992,
+ "Ġsp": 993,
+ "Ġwhen": 994,
+ ".set": 995,
+ "ĠĠĠĠĠĠ": 996,
+ "):Ċ": 997,
+ "to": 998,
+ "quire": 999,
+ "indow": 1000,
+ "lement": 1001,
+ "pect": 1002,
+ "ash": 1003,
+ "[i": 1004,
+ "Ġuse": 1005,
+ ".F": 1006,
+ "pec": 1007,
+ "Ġad": 1008,
+ "ove": 1009,
+ "ception": 1010,
+ "ength": 1011,
+ "include": 1012,
+ "ader": 1013,
+ "ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ": 1014,
+ "atus": 1015,
+ "Th": 1016,
+ "itle": 1017,
+ "rit": 1018,
+ "void": 1019,
+ "().": 1020,
+ "(Ċ": 1021,
+ "Ġoff": 1022,
+ "Ġother": 1023,
+ "Ġ&&": 1024,
+ "';Ċ": 1025,
+ "ms": 1026,
+ "Ġbeen": 1027,
+ "Ġte": 1028,
+ "ml": 1029,
+ "co": 1030,
+ "nc": 1031,
+ "13": 1032,
+ "ervice": 1033,
+ "Ġ%": 1034,
+ "**Ċ": 1035,
+ "ann": 1036,
+ "ade": 1037,
+ "ĊĊĊĊ": 1038,
+ "lock": 1039,
+ "const": 1040,
+ "100": 1041,
+ "ponse": 1042,
+ "Ġsup": 1043,
+ "++": 1044,
+ "date": 1045,
+ "Ġacc": 1046,
+ "Ġhad": 1047,
+ "Ġbu": 1048,
+ "200": 1049,
+ "ĠRe": 1050,
+ "Ġwere": 1051,
+ "Ġfile": 1052,
+ "Ġwould": 1053,
+ "ĠâĢľ": 1054,
+ "ven": 1055,
+ "iss": 1056,
+ "Ġour": 1057,
+ "class": 1058,
+ "raw": 1059,
+ "Ġyear": 1060,
+ "Data": 1061,
+ "Ġval": 1062,
+ "Ġsome": 1063,
+ "fter": 1064,
+ "ys": 1065,
+ "Ġ///": 1066,
+ "round": 1067,
+ "view": 1068,
+ "Ġpe": 1069,
+ "Ġthere": 1070,
+ "Ġsaid": 1071,
+ "du": 1072,
+ "of": 1073,
+ "line": 1074,
+ "/*": 1075,
+ "duct": 1076,
+ "Ġher": 1077,
+ "ĠĠĠĠĠĠĠĠĠĠĠĠĠ": 1078,
+ "Res": 1079,
+ "Ġco": 1080,
+ "Ġcomm": 1081,
+ "ise": 1082,
+ "min": 1083,
+ "ĠĠĠĠĊ": 1084,
+ "#include": 1085,
+ "ethod": 1086,
+ ".P": 1087,
+ "ute": 1088,
+ "Ġass": 1089,
+ "Int": 1090,
+ "ask": 1091,
+ "loc": 1092,
+ "Ġlike": 1093,
+ "ody": 1094,
+ "Ġlet": 1095,
+ "load": 1096,
+ "Ġam": 1097,
+ "rol": 1098,
+ "Ġgr": 1099,
+ "yp": 1100,
+ "Ġalso": 1101,
+ "ĠIt": 1102,
+ "url": 1103,
+ "ific": 1104,
+ "ors": 1105,
+ "_P": 1106,
+ "_n": 1107,
+ "igh": 1108,
+ "Ġthan": 1109,
+ "Com": 1110,
+ "AN": 1111,
+ "UL": 1112,
+ "ating": 1113,
+ "17": 1114,
+ "ĠThis": 1115,
+ "ref": 1116,
+ "_S": 1117,
+ "Ġstatic": 1118,
+ "roll": 1119,
+ "Ġjust": 1120,
+ "Ġresult": 1121,
+ "ian": 1122,
+ "idth": 1123,
+ "Ġthem": 1124,
+ "));Ċ": 1125,
+ "der": 1126,
+ "reak": 1127,
+ "Con": 1128,
+ "://": 1129,
+ "ule": 1130,
+ "...": 1131,
+ "arch": 1132,
+ "ement": 1133,
+ "Ġ<<": 1134,
+ "50": 1135,
+ "ush": 1136,
+ "ense": 1137,
+ "arr": 1138,
+ "Ġinto": 1139,
+ "cess": 1140,
+ "amp": 1141,
+ "ied": 1142,
+ "ument": 1143,
+ "Ġ\\": 1144,
+ "],": 1145,
+ "wo": 1146,
+ "als": 1147,
+ "Ġwhat": 1148,
+ "anc": 1149,
+ "Value": 1150,
+ "='": 1151,
+ "olum": 1152,
+ "Ġpos": 1153,
+ "ages": 1154,
+ "ayer": 1155,
+ "Ġsc": 1156,
+ "ues": 1157,
+ "\")Ċ": 1158,
+ "_T": 1159,
+ "Ġlist": 1160,
+ "(s": 1161,
+ "Ġcase": 1162,
+ "Ch": 1163,
+ "ĉĉĉĉĉ": 1164,
+ "////////": 1165,
+ "ponent": 1166,
+ "Ġz": 1167,
+ "Ġkn": 1168,
+ "let": 1169,
+ "DE": 1170,
+ "red": 1171,
+ "Ġfe": 1172,
+ "Ġ},Ċ": 1173,
+ "Ġ,": 1174,
+ "(t": 1175,
+ "Ġfirst": 1176,
+ "');Ċ": 1177,
+ "word": 1178,
+ "Ġimport": 1179,
+ "Ġact": 1180,
+ "Ġchar": 1181,
+ "CT": 1182,
+ "ĠTr": 1183,
+ "ople": 1184,
+ "={": 1185,
+ "ĉf": 1186,
+ "24": 1187,
+ "ient": 1188,
+ "cent": 1189,
+ ".j": 1190,
+ "lection": 1191,
+ "))Ċ": 1192,
+ "Ġonly": 1193,
+ "Ġprint": 1194,
+ "mer": 1195,
+ ".W": 1196,
+ "ock": 1197,
+ "Ġ--": 1198,
+ "Text": 1199,
+ "Ġop": 1200,
+ "ank": 1201,
+ "Ġits": 1202,
+ "Ġback": 1203,
+ "[\"": 1204,
+ "Ġneed": 1205,
+ "Ġcl": 1206,
+ "Ġsub": 1207,
+ "Ġla": 1208,
+ "((": 1209,
+ ".\"": 1210,
+ "Object": 1211,
+ "Ġstart": 1212,
+ "file": 1213,
+ "(self": 1214,
+ "ner": 1215,
+ "ey": 1216,
+ "Ġuser": 1217,
+ "Ġent": 1218,
+ "ĠCom": 1219,
+ "its": 1220,
+ "ĠCon": 1221,
+ "ouble": 1222,
+ "ower": 1223,
+ "item": 1224,
+ "very": 1225,
+ "ĠWe": 1226,
+ "64": 1227,
+ "lick": 1228,
+ "ĠQ": 1229,
+ "php": 1230,
+ "ttp": 1231,
+ "':": 1232,
+ "ics": 1233,
+ "Ġunder": 1234,
+ "Ġ*Ċ": 1235,
+ ".L": 1236,
+ ");": 1237,
+ "ices": 1238,
+ "Ġreg": 1239,
+ ")čĊ": 1240,
+ "ĉpublic": 1241,
+ "SS": 1242,
+ "Ġthen": 1243,
+ "reat": 1244,
+ "ious": 1245,
+ ".G": 1246,
+ "ek": 1247,
+ "irect": 1248,
+ "heck": 1249,
+ "cript": 1250,
+ "ning": 1251,
+ "ĠUn": 1252,
+ "Ġmay": 1253,
+ "ĠWh": 1254,
+ "Bo": 1255,
+ "Item": 1256,
+ "struct": 1257,
+ ".st": 1258,
+ "ream": 1259,
+ "ible": 1260,
+ "loat": 1261,
+ "Ġorg": 1262,
+ "und": 1263,
+ "sum": 1264,
+ "_in": 1265,
+ "../": 1266,
+ "_M": 1267,
+ "Ġhow": 1268,
+ "rite": 1269,
+ "'Ċ": 1270,
+ "To": 1271,
+ "40": 1272,
+ "ww": 1273,
+ "Ġpeople": 1274,
+ "index": 1275,
+ ".n": 1276,
+ "http": 1277,
+ "(m": 1278,
+ "ector": 1279,
+ "Ġind": 1280,
+ "Ġjav": 1281,
+ "],Ċ": 1282,
+ "ĠHe": 1283,
+ "_st": 1284,
+ "ful": 1285,
+ "ole": 1286,
+ "){Ċ": 1287,
+ "Ġshould": 1288,
+ "opy": 1289,
+ "elp": 1290,
+ "ier": 1291,
+ "_name": 1292,
+ "erson": 1293,
+ "ION": 1294,
+ "ote": 1295,
+ "Ġtest": 1296,
+ "Ġbet": 1297,
+ "rror": 1298,
+ "ular": 1299,
+ "ãĢ": 1300,
+ "ĠÐ": 1301,
+ "bs": 1302,
+ "ting": 1303,
+ "Ġmake": 1304,
+ "Tr": 1305,
+ "Ġafter": 1306,
+ "arget": 1307,
+ "RO": 1308,
+ "olumn": 1309,
+ "rc": 1310,
+ "_re": 1311,
+ "define": 1312,
+ "22": 1313,
+ "Ġright": 1314,
+ "right": 1315,
+ "day": 1316,
+ "Ġlong": 1317,
+ "[]": 1318,
+ "(p": 1319,
+ "td": 1320,
+ "cond": 1321,
+ "ĠPro": 1322,
+ "Ġrem": 1323,
+ "ptions": 1324,
+ "vid": 1325,
+ ".g": 1326,
+ "Ġext": 1327,
+ "Ġ__": 1328,
+ "')Ċ": 1329,
+ "pace": 1330,
+ "mp": 1331,
+ "Ġmin": 1332,
+ "stance": 1333,
+ "air": 1334,
+ "action": 1335,
+ "wh": 1336,
+ "type": 1337,
+ "util": 1338,
+ "ait": 1339,
+ "": 1340,
+ "IC": 1341,
+ "text": 1342,
+ "Ġph": 1343,
+ "Ġfl": 1344,
+ ".M": 1345,
+ "ccess": 1346,
+ "br": 1347,
+ "fore": 1348,
+ "ersion": 1349,
+ "),Ċ": 1350,
+ ".re": 1351,
+ "ateg": 1352,
+ "Ġloc": 1353,
+ "ins": 1354,
+ "-s": 1355,
+ "trib": 1356,
+ "ĠInt": 1357,
+ "Ġarray": 1358,
+ ",\"": 1359,
+ "Pro": 1360,
+ "(c": 1361,
+ "ession": 1362,
+ ">ĊĊ": 1363,
+ "Ġshe": 1364,
+ "\"]": 1365,
+ "aph": 1366,
+ "Ġexp": 1367,
+ "erty": 1368,
+ "ĠSe": 1369,
+ "Ġpar": 1370,
+ "unc": 1371,
+ "ET": 1372,
+ "Ġread": 1373,
+ "print": 1374,
+ "Ġrel": 1375,
+ "Ġform": 1376,
+ "Ġdr": 1377,
+ "Exception": 1378,
+ "input": 1379,
+ "Ġtrans": 1380,
+ "########": 1381,
+ "order": 1382,
+ "By": 1383,
+ "Ġaw": 1384,
+ "ities": 1385,
+ "uff": 1386,
+ "play": 1387,
+ ".add": 1388,
+ "ĠâĢĵ": 1389,
+ "Ġwant": 1390,
+ "Ġcomp": 1391,
+ "ments": 1392,
+ "Ġ||": 1393,
+ "az": 1394,
+ "be": 1395,
+ "Ġnumber": 1396,
+ "Ġrequire": 1397,
+ "ĠEx": 1398,
+ "60": 1399,
+ "Ġcol": 1400,
+ "Ġkey": 1401,
+ "ember": 1402,
+ "Ġtwo": 1403,
+ "Ġsize": 1404,
+ "Ġwhere": 1405,
+ "UT": 1406,
+ "result": 1407,
+ "ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ": 1408,
+ "ough": 1409,
+ "orld": 1410,
+ "ood": 1411,
+ "uch": 1412,
+ "ative": 1413,
+ "ger": 1414,
+ "arent": 1415,
+ "Ġ/*": 1416,
+ "Ġarg": 1417,
+ "Ġwhile": 1418,
+ "23": 1419,
+ "(this": 1420,
+ "Ġrec": 1421,
+ "Ġdif": 1422,
+ "State": 1423,
+ "Ġspec": 1424,
+ "ride": 1425,
+ "_F": 1426,
+ "Ġlook": 1427,
+ "AM": 1428,
+ "ility": 1429,
+ "eter": 1430,
+ "âĢĻt": 1431,
+ "ĊĊĊ": 1432,
+ "ayout": 1433,
+ "--------------------------------": 1434,
+ "ager": 1435,
+ "Ġcould": 1436,
+ "Ġbr": 1437,
+ "ends": 1438,
+ "ures": 1439,
+ "Ġknow": 1440,
+ "ets": 1441,
+ "ĠIf": 1442,
+ "ĠSh": 1443,
+ ".w": 1444,
+ "back": 1445,
+ "Ġser": 1446,
+ "Ġ+=": 1447,
+ "Ġfr": 1448,
+ "());Ċ": 1449,
+ "Ġhand": 1450,
+ "Ind": 1451,
+ "ULL": 1452,
+ "Im": 1453,
+ "();ĊĊ": 1454,
+ "Ġmost": 1455,
+ "Ġtry": 1456,
+ "Ġnow": 1457,
+ "rough": 1458,
+ ">čĊ": 1459,
+ "ackage": 1460,
+ "Ġhim": 1461,
+ "._": 1462,
+ "ify": 1463,
+ "Ġbreak": 1464,
+ "Ġ);Ċ": 1465,
+ "ren": 1466,
+ "#define": 1467,
+ "itt": 1468,
+ "Ġap": 1469,
+ "ĉc": 1470,
+ "(n": 1471,
+ "ĠYou": 1472,
+ ":ĊĊ": 1473,
+ "-m": 1474,
+ "Ġevery": 1475,
+ "ustom": 1476,
+ "lient": 1477,
+ "ocument": 1478,
+ "cription": 1479,
+ "Error": 1480,
+ "-b": 1481,
+ "о": 1482,
+ "][": 1483,
+ "99": 1484,
+ "trans": 1485,
+ "Ġpoint": 1486,
+ "Ġstd": 1487,
+ "Ġfil": 1488,
+ "Time": 1489,
+ "80": 1490,
+ "Ġmod": 1491,
+ "Ġ->": 1492,
+ "Ġerror": 1493,
+ "ah": 1494,
+ "Ġtext": 1495,
+ "roller": 1496,
+ "lose": 1497,
+ "ql": 1498,
+ "Ġpol": 1499,
+ ">": 1500,
+ "Ġshow": 1501,
+ "User": 1502,
+ "ased": 1503,
+ "Ġ{ĊĊ": 1504,
+ "Ġfind": 1505,
+ "а": 1506,
+ "ED": 1507,
+ "span": 1508,
+ "enu": 1509,
+ "Ġcurrent": 1510,
+ "Ġused": 1511,
+ "cept": 1512,
+ "clud": 1513,
+ "Ġplay": 1514,
+ "Ġlog": 1515,
+ "ution": 1516,
+ "fl": 1517,
+ "Ġsee": 1518,
+ "indows": 1519,
+ "Ġhelp": 1520,
+ "Ġthese": 1521,
+ "Ġpass": 1522,
+ "Ġdown": 1523,
+ "Ġeven": 1524,
+ "ason": 1525,
+ "uild": 1526,
+ "from": 1527,
+ "(d": 1528,
+ "Ġbl": 1529,
+ "label": 1530,
+ "else": 1531,
+ "е": 1532,
+ "Ġ(!": 1533,
+ "ized": 1534,
+ "(),": 1535,
+ "Ġob": 1536,
+ "Ġitem": 1537,
+ "ump": 1538,
+ "UR": 1539,
+ "orn": 1540,
+ "Ġdon": 1541,
+ "Se": 1542,
+ "man": 1543,
+ "27": 1544,
+ "ample": 1545,
+ "tn": 1546,
+ "================": 1547,
+ "He": 1548,
+ "gram": 1549,
+ "Ġdid": 1550,
+ "wn": 1551,
+ "_h": 1552,
+ "iver": 1553,
+ "Ġsm": 1554,
+ "Ġthrough": 1555,
+ "ĠAn": 1556,
+ "che": 1557,
+ "Ġinv": 1558,
+ "ouse": 1559,
+ "Ġes": 1560,
+ "ĠNew": 1561,
+ "export": 1562,
+ "mary": 1563,
+ "uto": 1564,
+ "ler": 1565,
+ "Ġlast": 1566,
+ "Ġevent": 1567,
+ "try": 1568,
+ "ï¼": 1569,
+ "ily": 1570,
+ "igned": 1571,
+ "ines": 1572,
+ "ollow": 1573,
+ "icense": 1574,
+ "sole": 1575,
+ "lear": 1576,
+ "(int": 1577,
+ "Ġagain": 1578,
+ "Ġhigh": 1579,
+ "html": 1580,
+ "Index": 1581,
+ "uthor": 1582,
+ "Ġ/**Ċ": 1583,
+ "Ġline": 1584,
+ "Event": 1585,
+ "_D": 1586,
+ "Ġdoes": 1587,
+ "itial": 1588,
+ "Ġcr": 1589,
+ "ars": 1590,
+ "28": 1591,
+ "Ġtem": 1592,
+ "cause": 1593,
+ "face": 1594,
+ "Ġ`": 1595,
+ "_A": 1596,
+ "Button": 1597,
+ "ature": 1598,
+ "ected": 1599,
+ "ES": 1600,
+ "ister": 1601,
+ "ĉĊ": 1602,
+ "Ġbefore": 1603,
+ "ale": 1604,
+ "other": 1605,
+ "Ġbecause": 1606,
+ "roid": 1607,
+ "Ġed": 1608,
+ "ik": 1609,
+ "reg": 1610,
+ "ĠDe": 1611,
+ "Ġdist": 1612,
+ "},Ċ": 1613,
+ "Ġstate": 1614,
+ "Ġcons": 1615,
+ "rint": 1616,
+ "att": 1617,
+ "Ġhere": 1618,
+ "ined": 1619,
+ "Ġfinal": 1620,
+ "Ġ\"\"": 1621,
+ "Key": 1622,
+ "LO": 1623,
+ "Ġdel": 1624,
+ "pty": 1625,
+ "thing": 1626,
+ "26": 1627,
+ "ĠAnd": 1628,
+ "Ġrun": 1629,
+ "ĠX": 1630,
+ "ym": 1631,
+ ".app": 1632,
+ "Ġvery": 1633,
+ "ces": 1634,
+ "_N": 1635,
+ "ared": 1636,
+ "ward": 1637,
+ "list": 1638,
+ "ited": 1639,
+ "olog": 1640,
+ "itch": 1641,
+ "Box": 1642,
+ "ife": 1643,
+ "33": 1644,
+ "Ġac": 1645,
+ "Ġmodel": 1646,
+ "Ġmon": 1647,
+ "Ġway": 1648,
+ "lete": 1649,
+ "Ġcall": 1650,
+ "Ġatt": 1651,
+ "Ġcal": 1652,
+ "vert": 1653,
+ "Ġdec": 1654,
+ "lease": 1655,
+ "oun": 1656,
+ "Ġ});Ċ": 1657,
+ "fr": 1658,
+ "formation": 1659,
+ "etail": 1660,
+ "Ġnum": 1661,
+ "aj": 1662,
+ "query": 1663,
+ "Ġwell": 1664,
+ "Ġobject": 1665,
+ "ĠAs": 1666,
+ "Ġyears": 1667,
+ "Color": 1668,
+ "IS": 1669,
+ "Ġdefault": 1670,
+ "Wh": 1671,
+ "Ġins": 1672,
+ "aint": 1673,
+ "Ġjava": 1674,
+ "Ġsim": 1675,
+ "ĠAr": 1676,
+ "mon": 1677,
+ "til": 1678,
+ "();čĊ": 1679,
+ "):": 1680,
+ "Set": 1681,
+ "29": 1682,
+ "atter": 1683,
+ "Ġview": 1684,
+ "Ġpres": 1685,
+ "array": 1686,
+ "We": 1687,
+ "At": 1688,
+ "Ġbel": 1689,
+ "Ġmany": 1690,
+ "21": 1691,
+ "Man": 1692,
+ "ender": 1693,
+ "Ġbeing": 1694,
+ "Ġgood": 1695,
+ "ĉĉĉĉĉĉ": 1696,
+ "ational": 1697,
+ "ware": 1698,
+ ".log": 1699,
+ "{čĊ": 1700,
+ "Ġusing": 1701,
+ "_B": 1702,
+ "Ġ:=": 1703,
+ "_w": 1704,
+ "ists": 1705,
+ "lish": 1706,
+ "Ġstud": 1707,
+ "ĠAl": 1708,
+ "Ġgu": 1709,
+ "config": 1710,
+ "uring": 1711,
+ "time": 1712,
+ "oken": 1713,
+ "amespace": 1714,
+ "Ġrequest": 1715,
+ "Ġchild": 1716,
+ "ĠÃ": 1717,
+ "lob": 1718,
+ "Ġparam": 1719,
+ "Ġ}čĊ": 1720,
+ "01": 1721,
+ "Ġecho": 1722,
+ "function": 1723,
+ "********************************": 1724,
+ "ps": 1725,
+ "Element": 1726,
+ "alk": 1727,
+ "lication": 1728,
+ "by": 1729,
+ "Size": 1730,
+ "rawing": 1731,
+ "Ġperson": 1732,
+ "ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ": 1733,
+ "\\n": 1734,
+ "object": 1735,
+ "ince": 1736,
+ "En": 1737,
+ "File": 1738,
+ "uf": 1739,
+ "ffect": 1740,
+ "AC": 1741,
+ "Ġstyle": 1742,
+ "summary": 1743,
+ "Ġque": 1744,
+ "_r": 1745,
+ "Ġ($": 1746,
+ "Model": 1747,
+ "ident": 1748,
+ "Ġmethod": 1749,
+ "IL": 1750,
+ "ott": 1751,
+ "less": 1752,
+ "ING": 1753,
+ "Ġ()": 1754,
+ "Ġexpect": 1755,
+ "ync": 1756,
+ "package": 1757,
+ "35": 1758,
+ "urs": 1759,
+ "Ġprot": 1760,
+ "./": 1761,
+ "pre": 1762,
+ "Ġ)Ċ": 1763,
+ "ma": 1764,
+ "Ġsur": 1765,
+ "Ġfound": 1766,
+ "Info": 1767,
+ "par": 1768,
+ "imes": 1769,
+ ".e": 1770,
+ "ains": 1771,
+ "Ġpost": 1772,
+ "-d": 1773,
+ "45": 1774,
+ "olean": 1775,
+ "Ġsl": 1776,
+ "PE": 1777,
+ "Ġsuch": 1778,
+ "select": 1779,
+ "ainer": 1780,
+ "Ġthink": 1781,
+ "Ġdiffer": 1782,
+ ".r": 1783,
+ "/**Ċ": 1784,
+ "FF": 1785,
+ "ool": 1786,
+ "plate": 1787,
+ "qual": 1788,
+ "ĠFor": 1789,
+ "Ġmuch": 1790,
+ "uc": 1791,
+ "(new": 1792,
+ "odule": 1793,
+ "Ġsom": 1794,
+ "Ġhttp": 1795,
+ "ĠList": 1796,
+ "Ġcount": 1797,
+ "Ġinst": 1798,
+ "char": 1799,
+ "mit": 1800,
+ ".id": 1801,
+ "aking": 1802,
+ "Ġgener": 1803,
+ "px": 1804,
+ "vice": 1805,
+ "37": 1806,
+ "_data": 1807,
+ "ĠNULL": 1808,
+ "}čĊ": 1809,
+ "idd": 1810,
+ "ãĢĤ": 1811,
+ "Ġmed": 1812,
+ "org": 1813,
+ "ider": 1814,
+ "ache": 1815,
+ "work": 1816,
+ "Ġcheck": 1817,
+ "ween": 1818,
+ "Ġ((": 1819,
+ "the": 1820,
+ "ants": 1821,
+ "><": 1822,
+ ".B": 1823,
+ "-c": 1824,
+ "Ġopen": 1825,
+ "Ġest": 1826,
+ "ĠĠĠĠĠĠĠĠĊ": 1827,
+ "Ġnext": 1828,
+ "IM": 1829,
+ "ÑĤ": 1830,
+ "OT": 1831,
+ "ó": 1832,
+ "Ġfollow": 1833,
+ "content": 1834,
+ "ĠĠĠĠĠĠĠĠĠĠĠĠ": 1835,
+ "Ġinclud": 1836,
+ "HE": 1837,
+ "ĠRes": 1838,
+ "Ġhref": 1839,
+ "и": 1840,
+ "Ġcar": 1841,
+ "ypes": 1842,
+ "image": 1843,
+ "Un": 1844,
+ "Ġbool": 1845,
+ "AD": 1846,
+ "Ġgame": 1847,
+ ".Form": 1848,
+ "rows": 1849,
+ "*/": 1850,
+ "velop": 1851,
+ ".Drawing": 1852,
+ "Ġpath": 1853,
+ "ision": 1854,
+ "Ġeach": 1855,
+ "ĠPl": 1856,
+ "_type": 1857,
+ "Path": 1858,
+ "nection": 1859,
+ "Ġav": 1860,
+ "').": 1861,
+ "Ġsupport": 1862,
+ "ENT": 1863,
+ "rem": 1864,
+ "\").": 1865,
+ "Ġown": 1866,
+ "Ġcor": 1867,
+ "count": 1868,
+ "miss": 1869,
+ "ually": 1870,
+ "Ġmem": 1871,
+ "std": 1872,
+ "ience": 1873,
+ "search": 1874,
+ "\"ĊĊ": 1875,
+ "Form": 1876,
+ "Ġsex": 1877,
+ "ename": 1878,
+ "Ġsign": 1879,
+ "Ġet": 1880,
+ "ĠĠĠĠĠĠĠĠĠĠ": 1881,
+ "','": 1882,
+ "ĠApp": 1883,
+ "Ġthose": 1884,
+ "off": 1885,
+ "Ġerr": 1886,
+ "Ġsystem": 1887,
+ "Ġbest": 1888,
+ "code": 1889,
+ "Ġsame": 1890,
+ "Ġdi": 1891,
+ "uss": 1892,
+ "Ġcreate": 1893,
+ "ather": 1894,
+ "Array": 1895,
+ ".in": 1896,
+ "fe": 1897,
+ "Service": 1898,
+ "UN": 1899,
+ "ats": 1900,
+ "ĠZ": 1901,
+ "alth": 1902,
+ "Ġmade": 1903,
+ "true": 1904,
+ "AB": 1905,
+ "Ġmark": 1906,
+ "rid": 1907,
+ "ified": 1908,
+ ",čĊ": 1909,
+ "yn": 1910,
+ "press": 1911,
+ "Ġgroup": 1912,
+ "Ġfin": 1913,
+ "ĠLicense": 1914,
+ "Field": 1915,
+ "eger": 1916,
+ "Ġworld": 1917,
+ "iness": 1918,
+ "ty": 1919,
+ "Ġprocess": 1920,
+ "(b": 1921,
+ "Ġcre": 1922,
+ "arn": 1923,
+ "ives": 1924,
+ "Ġmain": 1925,
+ "ideo": 1926,
+ "36": 1927,
+ "_g": 1928,
+ "AG": 1929,
+ "valid": 1930,
+ "img": 1931,
+ "PI": 1932,
+ "Ġcolor": 1933,
+ "Ġreport": 1934,
+ "Ġtake": 1935,
+ "rib": 1936,
+ "OM": 1937,
+ "Ġday": 1938,
+ "Request": 1939,
+ "Ġsk": 1940,
+ "bers": 1941,
+ "ĉs": 1942,
+ ".Add": 1943,
+ "oot": 1944,
+ "Image": 1945,
+ "Ġcomple": 1946,
+ "ollection": 1947,
+ "Ġtop": 1948,
+ "Ġfree": 1949,
+ "AS": 1950,
+ "De": 1951,
+ "ĠOn": 1952,
+ "IG": 1953,
+ "90": 1954,
+ "eta": 1955,
+ "Date": 1956,
+ "Ġaction": 1957,
+ "34": 1958,
+ "Over": 1959,
+ "itor": 1960,
+ "ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ": 1961,
+ "not": 1962,
+ "Ġindex": 1963,
+ "her": 1964,
+ "icon": 1965,
+ "On": 1966,
+ ";čĊčĊ": 1967,
+ "ivity": 1968,
+ "mand": 1969,
+ ".Windows": 1970,
+ "OL": 1971,
+ "Ġreal": 1972,
+ "Ġmax": 1973,
+ "land": 1974,
+ "....": 1975,
+ "raph": 1976,
+ "Ġbuild": 1977,
+ "leg": 1978,
+ "assword": 1979,
+ "?ĊĊ": 1980,
+ "â̦": 1981,
+ "ook": 1982,
+ "uck": 1983,
+ "Ġmessage": 1984,
+ "test": 1985,
+ "ivers": 1986,
+ "38": 1987,
+ "Ġinput": 1988,
+ "Ġart": 1989,
+ "Ġbetween": 1990,
+ "Get": 1991,
+ "enter": 1992,
+ "ground": 1993,
+ "ene": 1994,
+ "á": 1995,
+ ".length": 1996,
+ "Node": 1997,
+ "(i": 1998,
+ "Class": 1999,
+ "for": 2000,
+ "ĠâĢĶ": 2001,
+ "ten": 2002,
+ "oin": 2003,
+ "Ġke": 2004,
+ "ui": 2005,
+ "ĠIN": 2006,
+ "Ġtable": 2007,
+ "sub": 2008,
+ "ĠLe": 2009,
+ "Ġhead": 2010,
+ "Ġmust": 2011,
+ "////////////////": 2012,
+ ".util": 2013,
+ "Context": 2014,
+ "Ġorder": 2015,
+ "Ġmov": 2016,
+ "over": 2017,
+ "Ġcontin": 2018,
+ "Ġsay": 2019,
+ "static": 2020,
+ ".Text": 2021,
+ "ĠclassName": 2022,
+ "pany": 2023,
+ "Ġter": 2024,
+ "head": 2025,
+ "rg": 2026,
+ "Ġproduct": 2027,
+ "This": 2028,
+ ".âĢĿ": 2029,
+ "ĠBut": 2030,
+ "70": 2031,
+ "loy": 2032,
+ "Ġdouble": 2033,
+ "sg": 2034,
+ "Ġplace": 2035,
+ ".x": 2036,
+ "message": 2037,
+ "Ġinformation": 2038,
+ "private": 2039,
+ "Ġoper": 2040,
+ "ced": 2041,
+ "db": 2042,
+ "\">": 2043,
+ "Param": 2044,
+ "icle": 2045,
+ "Ġweek": 2046,
+ "Ġprop": 2047,
+ "table": 2048,
+ "idget": 2049,
+ "place": 2050,
+ "Prop": 2051,
+ "ĠAll": 2052,
+ "els": 2053,
+ "box": 2054,
+ ".ĊĊĊĊ": 2055,
+ ".R": 2056,
+ "ĠTo": 2057,
+ "iter": 2058,
+ "Sh": 2059,
+ "uration": 2060,
+ "older": 2061,
+ "_list": 2062,
+ "come": 2063,
+ "Ġsw": 2064,
+ "ization": 2065,
+ "ĉfor": 2066,
+ "bl": 2067,
+ "Ġprogram": 2068,
+ "(e": 2069,
+ "ape": 2070,
+ "check": 2071,
+ ".Forms": 2072,
+ "Ġund": 2073,
+ "ategory": 2074,
+ "75": 2075,
+ "ags": 2076,
+ "Ġresponse": 2077,
+ "US": 2078,
+ "request": 2079,
+ "Ġstruct": 2080,
+ "escription": 2081,
+ "Ġcode": 2082,
+ "_H": 2083,
+ "uffer": 2084,
+ "Ġwithout": 2085,
+ "lobal": 2086,
+ "Manager": 2087,
+ "ilter": 2088,
+ "PO": 2089,
+ "ĉthis": 2090,
+ "option": 2091,
+ "Ġsol": 2092,
+ "Ġ===": 2093,
+ "akes": 2094,
+ "Controller": 2095,
+ "44": 2096,
+ "Message": 2097,
+ "Ġref": 2098,
+ "ever": 2099,
+ "ĠSo": 2100,
+ "aining": 2101,
+ ".append": 2102,
+ "Ġstill": 2103,
+ "Ġprovid": 2104,
+ "Ġassert": 2105,
+ "med": 2106,
+ "Ġcap": 2107,
+ "usiness": 2108,
+ "Ġrep": 2109,
+ "tings": 2110,
+ "ved": 2111,
+ ".N": 2112,
+ "api": 2113,
+ "OD": 2114,
+ "Ġfield": 2115,
+ "iven": 2116,
+ "oto": 2117,
+ "âĢľ": 2118,
+ "col": 2119,
+ "(x": 2120,
+ "ght": 2121,
+ "Result": 2122,
+ "Code": 2123,
+ ".is": 2124,
+ "link": 2125,
+ "Ġcour": 2126,
+ "An": 2127,
+ "Ġteam": 2128,
+ "ĉint": 2129,
+ "ift": 2130,
+ "55": 2131,
+ "Ġsecond": 2132,
+ "Ġgoing": 2133,
+ "Ġrange": 2134,
+ "_E": 2135,
+ "ness": 2136,
+ "39": 2137,
+ "Ġfam": 2138,
+ "Ġnil": 2139,
+ "ĠCont": 2140,
+ "ailable": 2141,
+ "utes": 2142,
+ "atab": 2143,
+ "Ġfact": 2144,
+ "Ġvis": 2145,
+ "(&": 2146,
+ "ĠAN": 2147,
+ "31": 2148,
+ "Al": 2149,
+ "title": 2150,
+ "Ġandroid": 2151,
+ "CE": 2152,
+ "\\\"": 2153,
+ "irt": 2154,
+ "Ġwrit": 2155,
+ "н": 2156,
+ "ĉm": 2157,
+ "ftware": 2158,
+ "ond": 2159,
+ "Ġret": 2160,
+ "osition": 2161,
+ "Ġhome": 2162,
+ "Ġleft": 2163,
+ "args": 2164,
+ "meric": 2165,
+ "48": 2166,
+ "Ġdirect": 2167,
+ "oci": 2168,
+ "Pl": 2169,
+ "As": 2170,
+ "ret": 2171,
+ "ado": 2172,
+ "Of": 2173,
+ "chn": 2174,
+ "ĠGet": 2175,
+ "ee": 2176,
+ "ross": 2177,
+ "();": 2178,
+ "____": 2179,
+ ".ph": 2180,
+ "It": 2181,
+ "oute": 2182,
+ "Ġexper": 2183,
+ "chool": 2184,
+ "www": 2185,
+ "},": 2186,
+ "Ġallow": 2187,
+ "ĠÂ": 2188,
+ "())": 2189,
+ "size": 2190,
+ "ism": 2191,
+ "ai": 2192,
+ "tract": 2193,
+ "ane": 2194,
+ "...ĊĊ": 2195,
+ "context": 2196,
+ "Ġbeg": 2197,
+ "CH": 2198,
+ "Ġpage": 2199,
+ "hip": 2200,
+ "no": 2201,
+ "core": 2202,
+ "sp": 2203,
+ "Ġdifferent": 2204,
+ "iable": 2205,
+ "ĠMe": 2206,
+ "_IN": 2207,
+ "button": 2208,
+ "ĠIs": 2209,
+ "ervices": 2210,
+ "Ġca": 2211,
+ "Ġaround": 2212,
+ "App": 2213,
+ "ration": 2214,
+ "Ġrece": 2215,
+ "Ġreally": 2216,
+ "Ġimage": 2217,
+ "Ġtarget": 2218,
+ "Ġdep": 2219,
+ "opyright": 2220,
+ "tra": 2221,
+ "ingle": 2222,
+ "ital": 2223,
+ "Layout": 2224,
+ "Ġboth": 2225,
+ "Override": 2226,
+ "arm": 2227,
+ "=>": 2228,
+ "aterial": 2229,
+ "iled": 2230,
+ "Ġput": 2231,
+ "Qu": 2232,
+ "ÑĢ": 2233,
+ "ung": 2234,
+ "map": 2235,
+ "ĉĉĉĉĉĉĉĉ": 2236,
+ "Ġlevel": 2237,
+ "Component": 2238,
+ "book": 2239,
+ "creen": 2240,
+ "_RE": 2241,
+ "Ġconfig": 2242,
+ "ãģ": 2243,
+ "Or": 2244,
+ ".data": 2245,
+ "Ġdocument": 2246,
+ "\",\"": 2247,
+ "tribute": 2248,
+ "ux": 2249,
+ "Log": 2250,
+ "ference": 2251,
+ "post": 2252,
+ "_e": 2253,
+ "Ġlocal": 2254,
+ "andom": 2255,
+ "assert": 2256,
+ "Val": 2257,
+ "lected": 2258,
+ "ina": 2259,
+ "atabase": 2260,
+ "Add": 2261,
+ "Ġcontent": 2262,
+ ".print": 2263,
+ "signed": 2264,
+ "ric": 2265,
+ ".\"ĊĊ": 2266,
+ "Ġfa": 2267,
+ "!ĊĊ": 2268,
+ "-f": 2269,
+ "ived": 2270,
+ "Ġquest": 2271,
+ ".ex": 2272,
+ "Ġfloat": 2273,
+ "Ġdevelop": 2274,
+ "оÐ": 2275,
+ "Map": 2276,
+ "ading": 2277,
+ "Ġposs": 2278,
+ "UE": 2279,
+ "namespace": 2280,
+ "_O": 2281,
+ "ĉb": 2282,
+ ".Get": 2283,
+ ">(": 2284,
+ "json": 2285,
+ "etails": 2286,
+ "66": 2287,
+ "Ġtoo": 2288,
+ "Ġextends": 2289,
+ "ĠNone": 2290,
+ "Ġfore": 2291,
+ "(String": 2292,
+ "format": 2293,
+ "Ġgreat": 2294,
+ "inter": 2295,
+ "cale": 2296,
+ "Ñģ": 2297,
+ "ron": 2298,
+ "iving": 2299,
+ "Ent": 2300,
+ "ency": 2301,
+ "xt": 2302,
+ "oy": 2303,
+ "05": 2304,
+ "Ġmonth": 2305,
+ "Ġhapp": 2306,
+ "Ġsuper": 2307,
+ "bar": 2308,
+ "default": 2309,
+ "_de": 2310,
+ "ords": 2311,
+ "ln": 2312,
+ "({Ċ": 2313,
+ "ĠInd": 2314,
+ "ases": 2315,
+ "Ġtitle": 2316,
+ "Ġcontext": 2317,
+ "08": 2318,
+ "oh": 2319,
+ "-p": 2320,
+ "Em": 2321,
+ "Ġmet": 2322,
+ "Test": 2323,
+ "Ġlife": 2324,
+ "_v": 2325,
+ "ĠUS": 2326,
+ "UI": 2327,
+ "ocation": 2328,
+ "md": 2329,
+ "Ġ[Ċ": 2330,
+ "Ġ]": 2331,
+ "sw": 2332,
+ "Ġincre": 2333,
+ "script": 2334,
+ "ential": 2335,
+ "ways": 2336,
+ ".de": 2337,
+ "Ġsrc": 2338,
+ "Ġcatch": 2339,
+ "ĠAmeric": 2340,
+ "//Ċ": 2341,
+ "ĠĠĠĠĠĠĠĠĠĠĠĠĠĠ": 2342,
+ "Ġpay": 2343,
+ "plit": 2344,
+ "âĢĶ": 2345,
+ "Ġcoun": 2346,
+ "obj": 2347,
+ ".php": 2348,
+ "Ġchange": 2349,
+ "ething": 2350,
+ "'re": 2351,
+ "aster": 2352,
+ "los": 2353,
+ "lation": 2354,
+ "ĠĠĊ": 2355,
+ "Le": 2356,
+ "ä": 2357,
+ "({": 2358,
+ "ready": 2359,
+ "ĠNo": 2360,
+ "Ġposition": 2361,
+ "Ġold": 2362,
+ "Ġbook": 2363,
+ "abled": 2364,
+ "bug": 2365,
+ "202": 2366,
+ "Hand": 2367,
+ "};ĊĊ": 2368,
+ "isplay": 2369,
+ "aving": 2370,
+ "04": 2371,
+ "Ġgover": 2372,
+ "Ġversion": 2373,
+ "System": 2374,
+ "nect": 2375,
+ "response": 2376,
+ "Style": 2377,
+ "Up": 2378,
+ "angu": 2379,
+ "Ġthree": 2380,
+ "init": 2381,
+ "ero": 2382,
+ "Ġlaw": 2383,
+ "endif": 2384,
+ "Ġbase": 2385,
+ "email": 2386,
+ "(l": 2387,
+ "_V": 2388,
+ "Ġconf": 2389,
+ "ATE": 2390,
+ "Ġduring": 2391,
+ "tes": 2392,
+ "Ġconsole": 2393,
+ "ĠPr": 2394,
+ "Ġspe": 2395,
+ "ves": 2396,
+ "65": 2397,
+ "path": 2398,
+ "ialog": 2399,
+ "dition": 2400,
+ "_to": 2401,
+ "ards": 2402,
+ "Ġagainst": 2403,
+ "etwork": 2404,
+ "ĠPh": 2405,
+ "_L": 2406,
+ "cur": 2407,
+ "imit": 2408,
+ "With": 2409,
+ "Ġpower": 2410,
+ "ium": 2411,
+ "';ĊĊ": 2412,
+ "Ġwom": 2413,
+ "left": 2414,
+ "ources": 2415,
+ "atri": 2416,
+ "ĠIm": 2417,
+ "ĠMan": 2418,
+ "orth": 2419,
+ "${": 2420,
+ "88": 2421,
+ "quals": 2422,
+ "ese": 2423,
+ "_size": 2424,
+ "Ġiss": 2425,
+ "otal": 2426,
+ "-g": 2427,
+ "ique": 2428,
+ "rame": 2429,
+ "Ġwidth": 2430,
+ "erg": 2431,
+ ")(": 2432,
+ "ittle": 2433,
+ "TR": 2434,
+ "ĠThey": 2435,
+ "ences": 2436,
+ "02": 2437,
+ "rl": 2438,
+ "ons": 2439,
+ "Ġlabel": 2440,
+ ".y": 2441,
+ "-t": 2442,
+ "update": 2443,
+ "anel": 2444,
+ "sc": 2445,
+ ".to": 2446,
+ "Ġproject": 2447,
+ "ü": 2448,
+ "Ġelement": 2449,
+ "Ġsuccess": 2450,
+ "ĉĉĊ": 2451,
+ ".sh": 2452,
+ "ram": 2453,
+ "ched": 2454,
+ "())Ċ": 2455,
+ "Ġ(Ċ": 2456,
+ "Ġdate": 2457,
+ "Ġtot": 2458,
+ "_ST": 2459,
+ "All": 2460,
+ "ification": 2461,
+ "ĉvar": 2462,
+ "Ġtri": 2463,
+ "chem": 2464,
+ "my": 2465,
+ "Ġbig": 2466,
+ "ĠAd": 2467,
+ "ĠAt": 2468,
+ "ots": 2469,
+ "num": 2470,
+ "Act": 2471,
+ "Ġmap": 2472,
+ "era": 2473,
+ "cope": 2474,
+ ".$": 2475,
+ ",âĢĿ": 2476,
+ "Ġpop": 2477,
+ "Ġfew": 2478,
+ "Ġlen": 2479,
+ "uid": 2480,
+ "eters": 2481,
+ "ules": 2482,
+ "ÃŃ": 2483,
+ "source": 2484,
+ "https": 2485,
+ "Ġdem": 2486,
+ "Ġear": 2487,
+ "################": 2488,
+ "Ġmatch": 2489,
+ "ories": 2490,
+ "49": 2491,
+ "aces": 2492,
+ "ĠCl": 2493,
+ "Ġnode": 2494,
+ "78": 2495,
+ "irc": 2496,
+ "local": 2497,
+ "unity": 2498,
+ "};Ċ": 2499,
+ "Ġanother": 2500,
+ "<<": 2501,
+ "ogle": 2502,
+ "Ġsit": 2503,
+ "ework": 2504,
+ "TE": 2505,
+ ".I": 2506,
+ "NS": 2507,
+ "ology": 2508,
+ "ought": 2509,
+ ".Cont": 2510,
+ ">>": 2511,
+ "Ġcare": 2512,
+ "state": 2513,
+ "ĉprivate": 2514,
+ "Ġeffect": 2515,
+ "++)": 2516,
+ "_file": 2517,
+ "ending": 2518,
+ "Line": 2519,
+ "For": 2520,
+ "ior": 2521,
+ "ĠSc": 2522,
+ "Ġfun": 2523,
+ ".Size": 2524,
+ "ĉelse": 2525,
+ "])": 2526,
+ "start": 2527,
+ "vious": 2528,
+ "Ġ},": 2529,
+ "ours": 2530,
+ "Ġleg": 2531,
+ "Ġservice": 2532,
+ "Ġsince": 2533,
+ "iron": 2534,
+ "Label": 2535,
+ "Ġnon": 2536,
+ "Ġlos": 2537,
+ "iction": 2538,
+ "Ġfull": 2539,
+ "acter": 2540,
+ "board": 2541,
+ "gress": 2542,
+ "Ġturn": 2543,
+ "ither": 2544,
+ "09": 2545,
+ ".size": 2546,
+ "Ġbody": 2547,
+ "resh": 2548,
+ "eturn": 2549,
+ "199": 2550,
+ "(_": 2551,
+ "yles": 2552,
+ "ormal": 2553,
+ "pi": 2554,
+ "Ġsomething": 2555,
+ "!--": 2556,
+ "uint": 2557,
+ "Ġprodu": 2558,
+ "Ġstand": 2559,
+ "Ġproble": 2560,
+ "Ġavailable": 2561,
+ "mt": 2562,
+ "ĠBl": 2563,
+ "Ġ...": 2564,
+ "Ġblock": 2565,
+ "Input": 2566,
+ "Ġkeep": 2567,
+ "Count": 2568,
+ "open": 2569,
+ "Ġ['": 2570,
+ "Ġthrow": 2571,
+ "uilder": 2572,
+ "Action": 2573,
+ "Ġthings": 2574,
+ "True": 2575,
+ "Ġurl": 2576,
+ "ĠBo": 2577,
+ "printf": 2578,
+ "Ġred": 2579,
+ "js": 2580,
+ ".create": 2581,
+ "ĠOr": 2582,
+ "Status": 2583,
+ "Instance": 2584,
+ "Ġcontrol": 2585,
+ "Ġcome": 2586,
+ "Ġcustom": 2587,
+ "location": 2588,
+ "07": 2589,
+ "model": 2590,
+ "ĠčĊ": 2591,
+ "Ġsource": 2592,
+ "Ġeas": 2593,
+ ".out": 2594,
+ "]ĊĊ": 2595,
+ "oney": 2596,
+ "Ġawait": 2597,
+ "Ġpartic": 2598,
+ "AP": 2599,
+ "ublish": 2600,
+ "odes": 2601,
+ "_pro": 2602,
+ "ply": 2603,
+ "riter": 2604,
+ "Ġprov": 2605,
+ "Ġmill": 2606,
+ "HT": 2607,
+ "])Ċ": 2608,
+ "Ġchang": 2609,
+ "Ġask": 2610,
+ "ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ": 2611,
+ "Ġoutput": 2612,
+ "Ġemail": 2613,
+ "68": 2614,
+ ".push": 2615,
+ "Ġ}čĊčĊ": 2616,
+ "ination": 2617,
+ "47": 2618,
+ "atrix": 2619,
+ "Table": 2620,
+ "uccess": 2621,
+ "]);Ċ": 2622,
+ "ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ": 2623,
+ "Ġdisc": 2624,
+ "([": 2625,
+ "Ġbusiness": 2626,
+ "height": 2627,
+ ".html": 2628,
+ "ta": 2629,
+ "field": 2630,
+ "Ġrequired": 2631,
+ "_R": 2632,
+ "Ġgovern": 2633,
+ "}čĊčĊ": 2634,
+ "lex": 2635,
+ "500": 2636,
+ ".,": 2637,
+ "ĠSet": 2638,
+ "urch": 2639,
+ "///": 2640,
+ "ts": 2641,
+ "af": 2642,
+ "Ġmight": 2643,
+ "istory": 2644,
+ "Str": 2645,
+ "Ġnever": 2646,
+ "Response": 2647,
+ "arse": 2648,
+ "ada": 2649,
+ "ĠHow": 2650,
+ "Ġ*)": 2651,
+ "Ġ;": 2652,
+ "Ġhard": 2653,
+ "Ad": 2654,
+ "Ġintern": 2655,
+ "used": 2656,
+ "(data": 2657,
+ "mod": 2658,
+ "annel": 2659,
+ "Ġnp": 2660,
+ "ugg": 2661,
+ "Ġ/>Ċ": 2662,
+ "Ġcalled": 2663,
+ "body": 2664,
+ "Ġcho": 2665,
+ "(r": 2666,
+ "_set": 2667,
+ "ird": 2668,
+ "Ġ>=": 2669,
+ "Ġ};Ċ": 2670,
+ "Ġoptions": 2671,
+ "ĠGener": 2672,
+ "Ġheight": 2673,
+ "Point": 2674,
+ "You": 2675,
+ "ety": 2676,
+ "Click": 2677,
+ "Ġsmall": 2678,
+ "Ġide": 2679,
+ "Ġaccess": 2680,
+ "anguage": 2681,
+ "Ġprotected": 2682,
+ "Ġjob": 2683,
+ "ĠThere": 2684,
+ "Def": 2685,
+ "Ġaddress": 2686,
+ "Ġuint": 2687,
+ "Not": 2688,
+ "oo": 2689,
+ "aps": 2690,
+ "